diff --git a/.github/workflows/libc-fullbuild-tests.yml b/.github/workflows/libc-fullbuild-tests.yml index 2c88da653aae4..d93ac84116240 100644 --- a/.github/workflows/libc-fullbuild-tests.yml +++ b/.github/workflows/libc-fullbuild-tests.yml @@ -15,6 +15,7 @@ jobs: strategy: fail-fast: false matrix: + build_type: [Debug, Release, MinSizeRel] include: - os: ubuntu-24.04 ccache-variant: sccache @@ -68,7 +69,7 @@ jobs: cmake -B ${{ steps.strings.outputs.build-output-dir }} -DCMAKE_CXX_COMPILER=${{ matrix.cpp_compiler }} -DCMAKE_C_COMPILER=${{ matrix.c_compiler }} - -DCMAKE_BUILD_TYPE=MinSizeRel + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} -DCMAKE_INSTALL_PREFIX=${{ steps.strings.outputs.build-install-dir }} diff --git a/.github/workflows/libc-overlay-tests.yml b/.github/workflows/libc-overlay-tests.yml index 0a0916084b18c..de4b58c008ee4 100644 --- a/.github/workflows/libc-overlay-tests.yml +++ b/.github/workflows/libc-overlay-tests.yml @@ -16,6 +16,7 @@ jobs: # Set fail-fast to false to ensure that feedback is delivered for all matrix combinations. fail-fast: false matrix: + build_type: [Debug, Release, MinSizeRel] include: # TODO: add linux gcc when it is fixed - os: ubuntu-24.04 @@ -95,7 +96,7 @@ jobs: cmake -B ${{ steps.strings.outputs.build-output-dir }} -DCMAKE_CXX_COMPILER=${{ matrix.compiler.cpp_compiler }} -DCMAKE_C_COMPILER=${{ matrix.compiler.c_compiler }} - -DCMAKE_BUILD_TYPE=MinSizeRel + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} -DCMAKE_POLICY_DEFAULT_CMP0141=NEW diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml index b268f1faab989..178ab191a58be 100644 --- a/.github/workflows/premerge.yaml +++ b/.github/workflows/premerge.yaml @@ -21,15 +21,16 @@ on: - 'main' - 'release/**' +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + jobs: premerge-checks-linux: if: >- github.repository_owner == 'llvm' && (github.event_name != 'pull_request' || github.event.action != 'closed') runs-on: llvm-premerge-linux-runners - concurrency: - group: ${{ github.workflow }}-linux-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true steps: - name: Checkout LLVM uses: actions/checkout@v4 @@ -88,9 +89,6 @@ jobs: github.repository_owner == 'llvm' && (github.event_name != 'pull_request' || github.event.action != 'closed') runs-on: llvm-premerge-windows-runners - concurrency: - group: ${{ github.workflow }}-windows-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true defaults: run: shell: bash @@ -148,9 +146,6 @@ jobs: permerge-check-macos: runs-on: macos-14 - concurrency: - group: ${{ github.workflow }}-macos-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true if: >- github.repository_owner == 'llvm' && (startswith(github.ref_name, 'release/') || diff --git a/bolt/lib/RuntimeLibs/RuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/RuntimeLibrary.cpp index 336c6768a7f71..8f5719e84ecea 100644 --- a/bolt/lib/RuntimeLibs/RuntimeLibrary.cpp +++ b/bolt/lib/RuntimeLibs/RuntimeLibrary.cpp @@ -18,6 +18,7 @@ #include "llvm/Object/Archive.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/Path.h" +#include "llvm/Support/Program.h" #define DEBUG_TYPE "bolt-rtlib" @@ -38,6 +39,23 @@ std::string RuntimeLibrary::getLibPathByToolPath(StringRef ToolPath, llvm::sys::path::append(LibPath, "lib" LLVM_LIBDIR_SUFFIX); } llvm::sys::path::append(LibPath, LibFileName); + if (!llvm::sys::fs::exists(LibPath)) { + // If it is a symlink, check the directory that the symlink points to. + if (llvm::sys::fs::is_symlink_file(ToolPath)) { + SmallString<256> RealPath; + llvm::sys::fs::real_path(ToolPath, RealPath); + if (llvm::ErrorOr P = + llvm::sys::findProgramByName(RealPath)) { + outs() << "BOLT-INFO: library not found: " << LibPath << "\n" + << "BOLT-INFO: " << ToolPath << " is a symlink; will look up " + << LibFileName + << " at the target directory that the symlink points to\n"; + return getLibPath(*P, LibFileName); + } + } + errs() << "BOLT-ERROR: library not found: " << LibPath << "\n"; + exit(1); + } return std::string(LibPath); } diff --git a/clang/docs/BoundsSafety.rst b/clang/docs/BoundsSafety.rst index e24c69d8c7855..8635bec6e17c7 100644 --- a/clang/docs/BoundsSafety.rst +++ b/clang/docs/BoundsSafety.rst @@ -959,7 +959,8 @@ that has the define. #if defined(__has_feature) && __has_feature(bounds_safety) #define __counted_by(T) __attribute__((__counted_by__(T))) // ... other bounds annotations - #else #define __counted_by(T) // defined as nothing + #else + #define __counted_by(T) // defined as nothing // ... other bounds annotations #endif @@ -987,7 +988,7 @@ and it does not guarantee other types of memory safety properties. Consequently, it may not prevent some of the secondary bounds safety violations caused by other types of safety violations such as type confusion. For instance, ``-fbounds-safety`` does not perform type-safety checks on conversions between -`__single`` pointers of different pointee types (e.g., ``char *__single`` → +``__single`` pointers of different pointee types (e.g., ``char *__single`` → ``void *__single`` → ``int *__single``) beyond what the foundation languages (C/C++) already offer. @@ -1003,4 +1004,4 @@ Try it out Your feedback on the programming model is valuable. You may want to follow the instruction in :doc:`BoundsSafetyAdoptionGuide` to play with ``-fbounds-safety`` -and please send your feedback to `Yeoul Na `_. \ No newline at end of file +and please send your feedback to `Yeoul Na `_. diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst index ce38a3a9ba1f7..bf6dd9e13915f 100644 --- a/clang/docs/ClangFormatStyleOptions.rst +++ b/clang/docs/ClangFormatStyleOptions.rst @@ -2182,6 +2182,24 @@ the configuration (without a prefix: ``Auto``). aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa); } +.. _BinPackLongBracedList: + +**BinPackLongBracedList** (``Boolean``) :versionbadge:`clang-format 21` :ref:`¶ ` + If ``BinPackLongBracedList`` is ``true`` it overrides + ``BinPackArguments`` if there are 20 or more items in a braced + initializer list. + + .. code-block:: c++ + + BinPackLongBracedList: false vs. BinPackLongBracedList: true + vector x{ vector x{1, 2, ..., + 20, 21}; + 1, + 2, + ..., + 20, + 21}; + .. _BinPackParameters: **BinPackParameters** (``BinPackParametersStyle``) :versionbadge:`clang-format 3.7` :ref:`¶ ` diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index c31d6e90ecb08..725624ee8c66c 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -416,9 +416,9 @@ implementation. +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | safe_sync and progress with identifier and API | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| OpenMP directives in concurrent loop regions | :none:`unclaimed` | :none:`unclaimed` | | +| OpenMP directives in concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| atomics constructs on concurrent loop regions | :none:`unclaimed` | :none:`unclaimed` | | +| atomics constructs on concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | Loop construct with DO CONCURRENT | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ @@ -456,9 +456,7 @@ implementation. +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | map-type modifiers in arbitrary position | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| atomic constructs in loop region | :none:`unclaimed` | :none:`unclaimed` | | -+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| Lift nesting restriction on concurrent loop | :none:`unclaimed` | :none:`unclaimed` | | +| Lift nesting restriction on concurrent loop | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | priority clause for target constructs | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 6d7eb84927a97..50d3bbbc97e91 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -127,6 +127,7 @@ Improvements to Clang's diagnostics - The ``-Wunique-object-duplication`` warning has been added to warn about objects which are supposed to only exist once per program, but may get duplicated when built into a shared library. +- Fixed a bug where Clang's Analysis did not correctly model the destructor behavior of ``union`` members (#GH119415). Improvements to Clang's time-trace ---------------------------------- @@ -150,6 +151,8 @@ Bug Fixes to C++ Support ^^^^^^^^^^^^^^^^^^^^^^^^ - Clang is now better at keeping track of friend function template instance contexts. (#GH55509) +- The initialization kind of elements of structured bindings + direct-list-initialized from an array is corrected to direct-initialization. Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -236,6 +239,8 @@ clang-format ------------ - Adds ``BreakBeforeTemplateCloser`` option. +- Adds ``BinPackLongBracedList`` option to override bin packing options in + long (20 item or more) braced list initializer lists. libclang -------- diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst index 0f2f313ad184a..d977868b8a2c6 100644 --- a/clang/docs/UsersManual.rst +++ b/clang/docs/UsersManual.rst @@ -3125,6 +3125,24 @@ indexed format, regardeless whether it is produced by frontend or the IR pass. overhead. ``prefer-atomic`` will be transformed to ``atomic`` when supported by the target, or ``single`` otherwise. +.. option:: -fprofile-continuous + + Enables the continuous instrumentation profiling where profile counter updates + are continuously synced to a file. This option sets any neccessary modifiers + (currently ``%c``) in the default profile filename and passes any necessary + flags to the middle-end to support this mode. Value profiling is not supported + in continuous mode. + + .. code-block:: console + + $ clang++ -O2 -fprofile-generate -fprofile-continuous code.cc -o code + + Running ``./code`` will collect the profile and write it to the + ``default_xxxx.profraw`` file. However, if ``./code`` abruptly terminates or + does not call ``exit()``, in continuous mode the profile collected up to the + point of termination will be available in ``default_xxxx.profraw`` while in + the non-continuous mode, no profile file is generated. + .. option:: -ftemporal-profile Enables the temporal profiling extension for IRPGO to improve startup time by diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h index a30ae798a99bc..b82f75dd63fa5 100644 --- a/clang/include/clang/AST/DeclTemplate.h +++ b/clang/include/clang/AST/DeclTemplate.h @@ -1960,6 +1960,8 @@ class ClassTemplateSpecializationDecl : public CXXRecordDecl, bool hasStrictPackMatch() const { return StrictPackMatch; } + void setStrictPackMatch(bool Val) { StrictPackMatch = Val; } + /// Get the point of instantiation (if any), or null if none. SourceLocation getPointOfInstantiation() const { return PointOfInstantiation; diff --git a/clang/include/clang/AST/OperationKinds.def b/clang/include/clang/AST/OperationKinds.def index 8788b8ff0ef0a..b3dc7c3d8dc77 100644 --- a/clang/include/clang/AST/OperationKinds.def +++ b/clang/include/clang/AST/OperationKinds.def @@ -367,6 +367,9 @@ CAST_OPERATION(HLSLVectorTruncation) // Non-decaying array RValue cast (HLSL only). CAST_OPERATION(HLSLArrayRValue) +// Aggregate by Value cast (HLSL only). +CAST_OPERATION(HLSLElementwiseCast) + //===- Binary Operations -------------------------------------------------===// // Operators listed in order of precedence. // Note that additions to this should also update the StmtVisitor class, diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h index 239fcba4e5e05..0f7e3a8a01762 100644 --- a/clang/include/clang/ASTMatchers/ASTMatchers.h +++ b/clang/include/clang/ASTMatchers/ASTMatchers.h @@ -2489,7 +2489,28 @@ extern const internal::VariadicDynCastAllOfMatcher extern const internal::VariadicDynCastAllOfMatcher imaginaryLiteral; -/// Matches fixed point literals +/// Matches fixed-point literals eg. +/// 0.5r, 0.5hr, 0.5lr, 0.5uhr, 0.5ur, 0.5ulr +/// 1.0k, 1.0hk, 1.0lk, 1.0uhk, 1.0uk, 1.0ulk +/// Exponents 1.0e10k +/// Hexadecimal numbers 0x0.2p2r +/// +/// Does not match implicit conversions such as first two lines: +/// \code +/// short _Accum sa = 2; +/// _Accum a = 12.5; +/// _Accum b = 1.25hk; +/// _Fract c = 0.25hr; +/// _Fract v = 0.35uhr; +/// _Accum g = 1.45uhk; +/// _Accum decexp1 = 1.575e1k; +/// \endcode +/// \compile_args{-ffixed-point;-std=c99} +/// +/// The matcher \matcher{fixedPointLiteral()} matches +/// \match{1.25hk}, \match{0.25hr}, \match{0.35uhr}, +/// \match{1.45uhk}, \match{1.575e1k}, but does not +/// match \nomatch{12.5} and \nomatch{2} from the code block. extern const internal::VariadicDynCastAllOfMatcher fixedPointLiteral; diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def index cf0474470c08b..a7f5f1abbb825 100644 --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -221,6 +221,7 @@ AFFECTING_VALUE_CODEGENOPT(OptimizationLevel, 2, 0) ///< The -O[0-3] option spec AFFECTING_VALUE_CODEGENOPT(OptimizeSize, 2, 0) ///< If -Os (==1) or -Oz (==2) is specified. CODEGENOPT(AtomicProfileUpdate , 1, 0) ///< Set -fprofile-update=atomic +CODEGENOPT(ContinuousProfileSync, 1, 0) ///< Enable continuous instrumentation profiling /// Choose profile instrumenation kind or no instrumentation. ENUM_CODEGENOPT(ProfileInstr, ProfileInstrKind, 2, ProfileNone) /// Choose profile kind for PGO use compilation. @@ -320,6 +321,7 @@ CODEGENOPT(TimePassesPerRun , 1, 0) ///< Set when -ftime-report=per-pass-run is CODEGENOPT(TimeTrace , 1, 0) ///< Set when -ftime-trace is enabled. VALUE_CODEGENOPT(TimeTraceGranularity, 32, 500) ///< Minimum time granularity (in microseconds), ///< traced by time profiler +CODEGENOPT(InterchangeLoops , 1, 0) ///< Run loop-interchange. CODEGENOPT(UnrollLoops , 1, 0) ///< Control whether loops are unrolled. CODEGENOPT(RerollLoops , 1, 0) ///< Control whether loops are rerolled. CODEGENOPT(NoUseJumpTables , 1, 0) ///< Set when -fno-jump-tables is enabled. diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 77ca2d2aac31b..c9d192a20ff1f 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1795,6 +1795,11 @@ def fprofile_update_EQ : Joined<["-"], "fprofile-update=">, Values<"atomic,prefer-atomic,single">, MetaVarName<"">, HelpText<"Set update method of profile counters">, MarshallingInfoFlag>; +def fprofile_continuous : Flag<["-"], "fprofile-continuous">, + Group, Visibility<[ClangOption, CC1Option]>, + HelpText<"Enable continuous instrumentation profiling mode">, + MarshallingInfoFlag>; + defm pseudo_probe_for_profiling : BoolFOption<"pseudo-probe-for-profiling", CodeGenOpts<"PseudoProbeForProfiling">, DefaultFalse, PosFlag, @@ -4069,6 +4074,10 @@ def ftrap_function_EQ : Joined<["-"], "ftrap-function=">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Issue call to specified function rather than a trap instruction">, MarshallingInfoString>; +def floop_interchange : Flag<["-"], "floop-interchange">, Group, + HelpText<"Enable the loop interchange pass">, Visibility<[ClangOption, CC1Option]>; +def fno_loop_interchange: Flag<["-"], "fno-loop-interchange">, Group, + HelpText<"Disable the loop interchange pass">, Visibility<[ClangOption, CC1Option]>; def funroll_loops : Flag<["-"], "funroll-loops">, Group, HelpText<"Turn on loop unroller">, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>; def fno_unroll_loops : Flag<["-"], "fno-unroll-loops">, Group, diff --git a/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h b/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h index aa86e41806711..e60440e14a9fe 100644 --- a/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h +++ b/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h @@ -1146,11 +1146,29 @@ bool ExtractAPIVisitorBase::VisitTypedefNameDecl( StringRef Name = Decl->getName(); + auto nameMatches = [&Name](TagDecl *TagDecl) { + StringRef TagName = TagDecl->getName(); + + if (TagName == Name) + return true; + + // Also check whether the tag decl's name is the same as the typedef name + // with prefixed underscores + if (TagName.starts_with('_')) { + StringRef StrippedName = TagName.ltrim('_'); + + if (StrippedName == Name) + return true; + } + + return false; + }; + // If the underlying type was defined as part of the typedef modify it's // fragments directly and pretend the typedef doesn't exist. if (auto *TagDecl = Decl->getUnderlyingType()->getAsTagDecl()) { if (TagDecl->isEmbeddedInDeclarator() && TagDecl->isCompleteDefinition() && - Decl->getName() == TagDecl->getName()) { + nameMatches(TagDecl)) { SmallString<128> TagUSR; index::generateUSRForDecl(TagDecl, TagUSR); if (auto *Record = API.findRecordForUSR(TagUSR)) { @@ -1164,6 +1182,11 @@ bool ExtractAPIVisitorBase::VisitTypedefNameDecl( .append(Name, DeclarationFragments::FragmentKind::Identifier) .appendSemicolon(); + // Replace the name and subheading in case it's underscored so we can + // use the non-underscored version + Record->Name = Name; + Record->SubHeading = DeclarationFragmentsBuilder::getSubHeading(Decl); + return true; } } diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h index fbc9291ae950d..16956b4e0fbd4 100644 --- a/clang/include/clang/Format/Format.h +++ b/clang/include/clang/Format/Format.h @@ -1212,6 +1212,22 @@ struct FormatStyle { /// \version 3.7 bool BinPackArguments; + /// If ``BinPackLongBracedList`` is ``true`` it overrides + /// ``BinPackArguments`` if there are 20 or more items in a braced + /// initializer list. + /// \code + /// BinPackLongBracedList: false vs. BinPackLongBracedList: true + /// vector x{ vector x{1, 2, ..., + /// 20, 21}; + /// 1, + /// 2, + /// ..., + /// 20, + /// 21}; + /// \endcode + /// \version 21 + bool BinPackLongBracedList; + /// Different way to try to fit all parameters on a line. enum BinPackParametersStyle : int8_t { /// Bin-pack parameters. @@ -5266,6 +5282,7 @@ struct FormatStyle { R.AlwaysBreakBeforeMultilineStrings && AttributeMacros == R.AttributeMacros && BinPackArguments == R.BinPackArguments && + BinPackLongBracedList == R.BinPackLongBracedList && BinPackParameters == R.BinPackParameters && BitFieldColonSpacing == R.BitFieldColonSpacing && BracedInitializerIndentWidth == R.BracedInitializerIndentWidth && diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h index 20376e980ab35..6e8ca2e4710de 100644 --- a/clang/include/clang/Sema/SemaHLSL.h +++ b/clang/include/clang/Sema/SemaHLSL.h @@ -141,6 +141,9 @@ class SemaHLSL : public SemaBase { // Diagnose whether the input ID is uint/unit2/uint3 type. bool diagnoseInputIDType(QualType T, const ParsedAttr &AL); + bool CanPerformScalarCast(QualType SrcTy, QualType DestTy); + bool ContainsBitField(QualType BaseTy); + bool CanPerformElementwiseCast(Expr *Src, QualType DestType); ExprResult ActOnOutParamExpr(ParmVarDecl *Param, Expr *Arg); QualType getInoutParameterType(QualType Ty); diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 1e1e96a1c4782..1f0e022edcd76 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -6234,9 +6234,6 @@ bool Compiler::visitDeclRef(const ValueDecl *D, const Expr *E) { return this->emitGetPtrParam(It->second.Offset, E); } - - if (D->getType()->isReferenceType()) - return this->emitDummyPtr(D, E); } // In case we need to re-visit a declaration. diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp index e0b86d46428a2..833c9ef88d770 100644 --- a/clang/lib/AST/ByteCode/Program.cpp +++ b/clang/lib/AST/ByteCode/Program.cpp @@ -18,14 +18,12 @@ using namespace clang; using namespace clang::interp; unsigned Program::getOrCreateNativePointer(const void *Ptr) { - auto It = NativePointerIndices.find(Ptr); - if (It != NativePointerIndices.end()) - return It->second; + auto [It, Inserted] = + NativePointerIndices.try_emplace(Ptr, NativePointers.size()); + if (Inserted) + NativePointers.push_back(Ptr); - unsigned Idx = NativePointers.size(); - NativePointers.push_back(Ptr); - NativePointerIndices[Ptr] = Idx; - return Idx; + return It->second; } const void *Program::getNativePointer(unsigned Idx) { diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 4fc62919fde94..c22aa66ba2cfb 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -1956,6 +1956,7 @@ bool CastExpr::CastConsistency() const { case CK_FixedPointToBoolean: case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: CheckNoBasePath: assert(path_empty() && "Cast kind should not have a base path!"); break; diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 37019b5235f56..192b679b4c995 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -15047,6 +15047,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_NoOp: case CK_LValueToRValueBitCast: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return ExprEvaluatorBaseTy::VisitCastExpr(E); case CK_MemberPointerToBoolean: @@ -15905,6 +15906,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_IntegralToFixedPoint: case CK_MatrixCast: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: llvm_unreachable("invalid cast kind for complex value"); case CK_LValueToRValue: diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp index 304bbb2b422c6..3e144395cffc6 100644 --- a/clang/lib/Analysis/CFG.cpp +++ b/clang/lib/Analysis/CFG.cpp @@ -2041,6 +2041,8 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) { } // First destroy member objects. + if (RD->isUnion()) + return; for (auto *FI : RD->fields()) { // Check for constant size array. Set type to array element type. QualType QT = FI->getType(); diff --git a/clang/lib/Analysis/UninitializedValues.cpp b/clang/lib/Analysis/UninitializedValues.cpp index bf2f730618650..3a052eb27a444 100644 --- a/clang/lib/Analysis/UninitializedValues.cpp +++ b/clang/lib/Analysis/UninitializedValues.cpp @@ -379,8 +379,10 @@ void ClassifyRefs::classify(const Expr *E, Class C) { } FindVarResult Var = findVar(E, DC); - if (const DeclRefExpr *DRE = Var.getDeclRefExpr()) - Classification[DRE] = std::max(Classification[DRE], C); + if (const DeclRefExpr *DRE = Var.getDeclRefExpr()) { + auto &Class = Classification[DRE]; + Class = std::max(Class, C); + } } void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) { diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h index a18c7169af1eb..a748ddaa110a5 100644 --- a/clang/lib/CodeGen/Address.h +++ b/clang/lib/CodeGen/Address.h @@ -197,10 +197,7 @@ class Address { /// Return the type of the pointer value. llvm::PointerType *getType() const { - return llvm::PointerType::get( - ElementType, - llvm::cast(Pointer.getPointer()->getType()) - ->getAddressSpace()); + return llvm::cast(Pointer.getPointer()->getType()); } /// Return the type of the values stored in this address. diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 97e9bbccd61ef..1750719e17670 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -124,15 +124,25 @@ namespace clang { extern llvm::cl::opt ClSanitizeGuardChecks; } -namespace { - // Default filename used for profile generation. -std::string getDefaultProfileGenName() { +static std::string getDefaultProfileGenName() { return DebugInfoCorrelate || ProfileCorrelate != InstrProfCorrelator::NONE ? "default_%m.proflite" : "default_%m.profraw"; } +// Path and name of file used for profile generation +static std::string getProfileGenName(const CodeGenOptions &CodeGenOpts) { + std::string FileName = CodeGenOpts.InstrProfileOutput.empty() + ? getDefaultProfileGenName() + : CodeGenOpts.InstrProfileOutput; + if (CodeGenOpts.ContinuousProfileSync) + FileName = "%c" + FileName; + return FileName; +} + +namespace { + class EmitAssemblyHelper { CompilerInstance &CI; DiagnosticsEngine &Diags; @@ -551,7 +561,9 @@ getInstrProfOptions(const CodeGenOptions &CodeGenOpts, return std::nullopt; InstrProfOptions Options; Options.NoRedZone = CodeGenOpts.DisableRedZone; - Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput; + Options.InstrProfileOutput = CodeGenOpts.ContinuousProfileSync + ? ("%c" + CodeGenOpts.InstrProfileOutput) + : CodeGenOpts.InstrProfileOutput; Options.Atomic = CodeGenOpts.AtomicProfileUpdate; return Options; } @@ -822,13 +834,12 @@ void EmitAssemblyHelper::RunOptimizationPipeline( if (CodeGenOpts.hasProfileIRInstr()) // -fprofile-generate. - PGOOpt = PGOOptions( - CodeGenOpts.InstrProfileOutput.empty() ? getDefaultProfileGenName() - : CodeGenOpts.InstrProfileOutput, - "", "", CodeGenOpts.MemoryProfileUsePath, nullptr, PGOOptions::IRInstr, - PGOOptions::NoCSAction, ClPGOColdFuncAttr, - CodeGenOpts.DebugInfoForProfiling, - /*PseudoProbeForProfiling=*/false, CodeGenOpts.AtomicProfileUpdate); + PGOOpt = PGOOptions(getProfileGenName(CodeGenOpts), "", "", + CodeGenOpts.MemoryProfileUsePath, nullptr, + PGOOptions::IRInstr, PGOOptions::NoCSAction, + ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling, + /*PseudoProbeForProfiling=*/false, + CodeGenOpts.AtomicProfileUpdate); else if (CodeGenOpts.hasProfileIRUse()) { // -fprofile-use. auto CSAction = CodeGenOpts.hasProfileCSIRUse() ? PGOOptions::CSIRUse @@ -872,24 +883,20 @@ void EmitAssemblyHelper::RunOptimizationPipeline( PGOOpt->Action != PGOOptions::SampleUse && "Cannot run CSProfileGen pass with ProfileGen or SampleUse " " pass"); - PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty() - ? getDefaultProfileGenName() - : CodeGenOpts.InstrProfileOutput; + PGOOpt->CSProfileGenFile = getProfileGenName(CodeGenOpts); PGOOpt->CSAction = PGOOptions::CSIRInstr; } else - PGOOpt = PGOOptions("", - CodeGenOpts.InstrProfileOutput.empty() - ? getDefaultProfileGenName() - : CodeGenOpts.InstrProfileOutput, - "", /*MemoryProfile=*/"", nullptr, - PGOOptions::NoAction, PGOOptions::CSIRInstr, - ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling); + PGOOpt = PGOOptions("", getProfileGenName(CodeGenOpts), "", + /*MemoryProfile=*/"", nullptr, PGOOptions::NoAction, + PGOOptions::CSIRInstr, ClPGOColdFuncAttr, + CodeGenOpts.DebugInfoForProfiling); } if (TM) TM->setPGOOption(PGOOpt); PipelineTuningOptions PTO; PTO.LoopUnrolling = CodeGenOpts.UnrollLoops; + PTO.LoopInterchange = CodeGenOpts.InterchangeLoops; // For historical reasons, loop interleaving is set to mirror setting for loop // unrolling. PTO.LoopInterleaving = CodeGenOpts.UnrollLoops; @@ -1314,6 +1321,7 @@ runThinLTOBackend(CompilerInstance &CI, ModuleSummaryIndex *CombinedIndex, initTargetOptions(CI, Diags, Conf.Options); Conf.SampleProfile = std::move(SampleProfile); Conf.PTO.LoopUnrolling = CGOpts.UnrollLoops; + Conf.PTO.LoopInterchange = CGOpts.InterchangeLoops; // For historical reasons, loop interleaving is set to mirror setting for loop // unrolling. Conf.PTO.LoopInterleaving = CGOpts.UnrollLoops; diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index aaba354c08547..faef6a5fbe1f5 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -1097,31 +1097,10 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() { if (BlockDescriptorType) return BlockDescriptorType; - llvm::Type *UnsignedLongTy = - getTypes().ConvertType(getContext().UnsignedLongTy); - - // struct __block_descriptor { - // unsigned long reserved; - // unsigned long block_size; - // - // // later, the following will be added - // - // struct { - // void (*copyHelper)(); - // void (*copyHelper)(); - // } helpers; // !!! optional - // - // const char *signature; // the block signature - // const char *layout; // reserved - // }; - BlockDescriptorType = llvm::StructType::create( - "struct.__block_descriptor", UnsignedLongTy, UnsignedLongTy); - - // Now form a pointer to that. unsigned AddrSpace = 0; if (getLangOpts().OpenCL) AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_constant); - BlockDescriptorType = llvm::PointerType::get(BlockDescriptorType, AddrSpace); + BlockDescriptorType = llvm::PointerType::get(getLLVMContext(), AddrSpace); return BlockDescriptorType; } diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index db595796c067e..d5b584ec0f2e9 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -3567,6 +3567,10 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { DBuilder.createEnumerator(Enum->getName(), Enum->getInitVal())); } + std::optional EnumKind; + if (auto *Attr = ED->getAttr()) + EnumKind = Attr->getExtensibility(); + // Return a CompositeType for the enum itself. llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators); @@ -3576,7 +3580,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit); return DBuilder.createEnumerationType( EnumContext, ED->getName(), DefUnit, Line, Size, Align, EltArray, ClassTy, - /*RunTimeLang=*/0, Identifier, ED->isScoped()); + /*RunTimeLang=*/0, Identifier, ED->isScoped(), EnumKind); } llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent, diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index cc6815db4d20f..668282a6ab1a8 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -2870,15 +2870,12 @@ void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) { // We can also keep the existing global if the address space is what we // expect it to be, if not, it is replaced. - QualType ASTTy = VD->getType(); clang::LangAS GVAS = GetGlobalVarAddressSpace(VD); auto TargetAS = getContext().getTargetAddressSpace(GVAS); if (Entry->getType()->getAddressSpace() == TargetAS) continue; - // Make a new global with the correct type / address space. - llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy); - llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS); + llvm::PointerType *PTy = llvm::PointerType::get(getLLVMContext(), TargetAS); // Replace all uses of the old global with a cast. Since we mutate the type // in place we neeed an intermediate that takes the spot of the old entry @@ -2891,8 +2888,7 @@ void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) { Entry->mutateType(PTy); llvm::Constant *NewPtrForOldDecl = - llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( - Entry, DummyGV->getType()); + llvm::ConstantExpr::getAddrSpaceCast(Entry, DummyGV->getType()); // Now we have a casted version of the changed global, the dummy can be // replaced and deleted. diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp index 1c2fecea1a6ac..f5950f03673a1 100644 --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -345,10 +345,7 @@ void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD, void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) { // extern "C" int atexit(void (*f)(void)); - assert(dtorStub->getType() == - llvm::PointerType::get( - llvm::FunctionType::get(CGM.VoidTy, false), - dtorStub->getType()->getPointerAddressSpace()) && + assert(dtorStub->getType()->isPointerTy() && "Argument to atexit has a wrong type."); llvm::FunctionType *atexitTy = @@ -372,10 +369,7 @@ CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) { // value is returned. // // extern "C" int unatexit(void (*f)(void)); - assert(dtorStub->getType() == - llvm::PointerType::get( - llvm::FunctionType::get(CGM.VoidTy, false), - dtorStub->getType()->getPointerAddressSpace()) && + assert(dtorStub->getType()->isPointerTy() && "Argument to unatexit has a wrong type."); llvm::FunctionType *unatexitTy = diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index bf8df2789f58d..2bbc0791c6587 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -872,7 +872,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *TypeHash = llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str())); - llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); + llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0); Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy, Ty->getAsCXXRecordDecl(), @@ -3054,7 +3054,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { getContext().getDeclAlign(VD)); llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); auto *PTy = llvm::PointerType::get( - VarTy, getTypes().getTargetAddressSpace(VD->getType())); + getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType())); Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy); } else { // Should we be using the alignment of the constant pointer we emitted? @@ -5338,6 +5338,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return EmitUnsupportedLValue(E, "unexpected cast lvalue"); case CK_Dependent: @@ -6376,3 +6377,75 @@ RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; } + +void CodeGenFunction::FlattenAccessAndType( + Address Addr, QualType AddrType, + SmallVectorImpl> &AccessList, + SmallVectorImpl &FlatTypes) { + // WorkList is list of type we are processing + the Index List to access + // the field of that type in Addr for use in a GEP + llvm::SmallVector>, + 16> + WorkList; + llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32); + // Addr should be a pointer so we need to 'dereference' it + WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}}); + + while (!WorkList.empty()) { + auto [T, IdxList] = WorkList.pop_back_val(); + T = T.getCanonicalType().getUnqualifiedType(); + assert(!isa(T) && "Matrix types not yet supported in HLSL"); + if (const auto *CAT = dyn_cast(T)) { + uint64_t Size = CAT->getZExtSize(); + for (int64_t I = Size - 1; I > -1; I--) { + llvm::SmallVector IdxListCopy = IdxList; + IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); + WorkList.emplace_back(CAT->getElementType(), IdxListCopy); + } + } else if (const auto *RT = dyn_cast(T)) { + const RecordDecl *Record = RT->getDecl(); + assert(!Record->isUnion() && "Union types not supported in flat cast."); + + const CXXRecordDecl *CXXD = dyn_cast(Record); + + llvm::SmallVector FieldTypes; + if (CXXD && CXXD->isStandardLayout()) + Record = CXXD->getStandardLayoutBaseWithFields(); + + // deal with potential base classes + if (CXXD && !CXXD->isStandardLayout()) { + for (auto &Base : CXXD->bases()) + FieldTypes.push_back(Base.getType()); + } + + for (auto *FD : Record->fields()) + FieldTypes.push_back(FD->getType()); + + for (int64_t I = FieldTypes.size() - 1; I > -1; I--) { + llvm::SmallVector IdxListCopy = IdxList; + IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); + WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy}); + } + } else if (const auto *VT = dyn_cast(T)) { + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + Address GEP = + Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep"); + for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) { + llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I); + // gep on vector fields is not recommended so combine gep with + // extract/insert + AccessList.emplace_back(GEP, Idx); + FlatTypes.push_back(VT->getElementType()); + } + } else { + // a scalar/builtin type + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + Address GEP = + Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep"); + AccessList.emplace_back(GEP, nullptr); + FlatTypes.push_back(T); + } + } +} diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 2ad6587089f10..c3f1cbed6b39f 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -491,6 +491,79 @@ static bool isTrivialFiller(Expr *E) { return false; } +// emit a flat cast where the RHS is a scalar, including vector +static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal, + QualType DestTy, llvm::Value *SrcVal, + QualType SrcTy, SourceLocation Loc) { + // Flatten our destination + SmallVector DestTypes; // Flattened type + SmallVector, 16> StoreGEPList; + // ^^ Flattened accesses to DestVal we want to store into + CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); + + assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting."); + const VectorType *VT = SrcTy->getAs(); + SrcTy = VT->getElementType(); + assert(StoreGEPList.size() <= VT->getNumElements() && + "Cannot perform HLSL flat cast when vector source \ + object has less elements than flattened destination \ + object."); + for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) { + llvm::Value *Load = CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load"); + llvm::Value *Cast = + CGF.EmitScalarConversion(Load, SrcTy, DestTypes[I], Loc); + + // store back + llvm::Value *Idx = StoreGEPList[I].second; + if (Idx) { + llvm::Value *V = + CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); + Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); + } + CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + } + return; +} + +// emit a flat cast where the RHS is an aggregate +static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, + QualType DestTy, Address SrcVal, + QualType SrcTy, SourceLocation Loc) { + // Flatten our destination + SmallVector DestTypes; // Flattened type + SmallVector, 16> StoreGEPList; + // ^^ Flattened accesses to DestVal we want to store into + CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); + // Flatten our src + SmallVector SrcTypes; // Flattened type + SmallVector, 16> LoadGEPList; + // ^^ Flattened accesses to SrcVal we want to load from + CGF.FlattenAccessAndType(SrcVal, SrcTy, LoadGEPList, SrcTypes); + + assert(StoreGEPList.size() <= LoadGEPList.size() && + "Cannot perform HLSL flat cast when flattened source object \ + has less elements than flattened destination object."); + // apply casts to what we load from LoadGEPList + // and store result in Dest + for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) { + llvm::Value *Idx = LoadGEPList[I].second; + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); + Load = + Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; + llvm::Value *Cast = + CGF.EmitScalarConversion(Load, SrcTypes[I], DestTypes[I], Loc); + + // store back + Idx = StoreGEPList[I].second; + if (Idx) { + llvm::Value *V = + CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); + Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); + } + CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + } +} + /// Emit initialization of an array from an initializer list. ExprToVisit must /// be either an InitListEpxr a CXXParenInitListExpr. void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, @@ -890,7 +963,25 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_HLSLArrayRValue: Visit(E->getSubExpr()); break; - + case CK_HLSLElementwiseCast: { + Expr *Src = E->getSubExpr(); + QualType SrcTy = Src->getType(); + RValue RV = CGF.EmitAnyExpr(Src); + QualType DestTy = E->getType(); + Address DestVal = Dest.getAddress(); + SourceLocation Loc = E->getExprLoc(); + + if (RV.isScalar()) { + llvm::Value *SrcVal = RV.getScalarVal(); + EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + } else { + assert(RV.isAggregate() && + "Can't perform HLSL Aggregate cast on a complex type."); + Address SrcVal = RV.getAggregateAddress(); + EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + } + break; + } case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: @@ -1461,6 +1552,7 @@ static bool castPreservesZero(const CastExpr *CE) { case CK_NonAtomicToAtomic: case CK_AtomicToNonAtomic: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: return true; case CK_BaseToDerivedMemberPointer: diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp index ac31dff11b585..c2679ea92dc97 100644 --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -610,6 +610,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op, case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: llvm_unreachable("invalid cast kind for complex value"); case CK_FloatingRealToComplex: diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index 655fc3dc954c8..ef11798869d3b 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -1335,6 +1335,7 @@ class ConstExprEmitter case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return nullptr; } llvm_unreachable("Invalid CastKind"); diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index df850421c72c6..80daed7e53951 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2269,6 +2269,42 @@ bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { return true; } +// RHS is an aggregate type +static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal, + QualType RHSTy, QualType LHSTy, + SourceLocation Loc) { + SmallVector, 16> LoadGEPList; + SmallVector SrcTypes; // Flattened type + CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes); + // LHS is either a vector or a builtin? + // if its a vector create a temp alloca to store into and return that + if (auto *VecTy = LHSTy->getAs()) { + assert(SrcTypes.size() >= VecTy->getNumElements() && + "Flattened type on RHS must have more elements than vector on LHS."); + llvm::Value *V = + CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp")); + // write to V. + for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) { + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); + llvm::Value *Idx = LoadGEPList[I].second; + Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") + : Load; + llvm::Value *Cast = CGF.EmitScalarConversion( + Load, SrcTypes[I], VecTy->getElementType(), Loc); + V = CGF.Builder.CreateInsertElement(V, Cast, I); + } + return V; + } + // i its a builtin just do an extract element or load. + assert(LHSTy->isBuiltinType() && + "Destination type must be a vector or builtin type."); + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load"); + llvm::Value *Idx = LoadGEPList[0].second; + Load = + Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; + return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc); +} + // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts // have to handle a more broad range of conversions than explicit casts, as they // handle things like function to ptr-to-function decay etc. @@ -2759,7 +2795,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy); return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc"); } + case CK_HLSLElementwiseCast: { + RValue RV = CGF.EmitAnyExpr(E); + SourceLocation Loc = CE->getExprLoc(); + QualType SrcTy = E->getType(); + assert(RV.isAggregate() && "Not a valid HLSL Flat Cast."); + // RHS is an aggregate + Address SrcVal = RV.getAggregateAddress(); + return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc); + } } // end of switch llvm_unreachable("unknown scalar cast"); diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index dd900f9b32fb7..6c929a6431c0f 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -5717,7 +5717,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) IntTy = CGM.IntTy; LongTy = cast(Types.ConvertType(Ctx.LongTy)); Int8PtrTy = CGM.Int8PtrTy; - Int8PtrProgramASTy = llvm::PointerType::get(CGM.Int8Ty, ProgramAS); + Int8PtrProgramASTy = llvm::PointerType::get(CGM.getLLVMContext(), ProgramAS); Int8PtrPtrTy = CGM.Int8PtrPtrTy; // arm64 targets use "int" ivar offset variables. All others, diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index ced3484fbd2b6..e7a5100a9fa29 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4439,6 +4439,11 @@ class CodeGenFunction : public CodeGenTypeCache { AggValueSlot slot = AggValueSlot::ignored()); LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e); + void FlattenAccessAndType( + Address Addr, QualType AddrTy, + SmallVectorImpl> &AccessList, + SmallVectorImpl &FlatTypes); + llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 47c03ea5e72cb..c056d103a7fe4 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -4432,7 +4432,7 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) { GlobalDecl ResolverGD; if (getTarget().supportsIFunc()) { ResolverType = llvm::FunctionType::get( - llvm::PointerType::get(DeclTy, + llvm::PointerType::get(getLLVMContext(), getTypes().getTargetAddressSpace(FD->getType())), false); } @@ -4604,8 +4604,8 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) { // cpu_dispatch will be emitted in this translation unit. if (ShouldReturnIFunc) { unsigned AS = getTypes().getTargetAddressSpace(FD->getType()); - llvm::Type *ResolverType = - llvm::FunctionType::get(llvm::PointerType::get(DeclTy, AS), false); + llvm::Type *ResolverType = llvm::FunctionType::get( + llvm::PointerType::get(getLLVMContext(), AS), false); llvm::Constant *Resolver = GetOrCreateLLVMFunction( MangledName + ".resolver", ResolverType, GlobalDecl{}, /*ForVTable=*/false); diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp index 4922b082cf09c..dc3a1d4287be1 100644 --- a/clang/lib/CodeGen/Targets/AArch64.cpp +++ b/clang/lib/CodeGen/Targets/AArch64.cpp @@ -843,7 +843,7 @@ RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, llvm::Type *BaseTy = CGF.ConvertType(Ty); if (IsIndirect) - BaseTy = llvm::PointerType::getUnqual(BaseTy); + BaseTy = llvm::PointerType::getUnqual(BaseTy->getContext()); else if (AI.getCoerceToType()) BaseTy = AI.getCoerceToType(); @@ -961,7 +961,7 @@ RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, if (IsIndirect) { // If it's been passed indirectly (actually a struct), whatever we find from // stored registers or on the stack will actually be a struct **. - MemTy = llvm::PointerType::getUnqual(MemTy); + MemTy = llvm::PointerType::getUnqual(MemTy->getContext()); } const Type *Base = nullptr; diff --git a/clang/lib/CodeGen/Targets/Hexagon.cpp b/clang/lib/CodeGen/Targets/Hexagon.cpp index 8fd2a81494d99..aada8d0d61303 100644 --- a/clang/lib/CodeGen/Targets/Hexagon.cpp +++ b/clang/lib/CodeGen/Targets/Hexagon.cpp @@ -336,10 +336,6 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, // Implement the block where argument is in register saved area CGF.EmitBlock(InRegBlock); - llvm::Type *PTy = CGF.ConvertType(Ty); - llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( - __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); - CGF.Builder.CreateStore(__new_saved_reg_area_pointer, __current_saved_reg_area_pointer_p); @@ -388,22 +384,16 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, CGF.Builder.CreateStore(__new_overflow_area_pointer, __current_saved_reg_area_pointer_p); - // Bitcast the overflow area pointer to the type of argument. - llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); - llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( - __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); - CGF.EmitBranch(ContBlock); - // Get the correct pointer to load the variable argument // Implement the ContBlock CGF.EmitBlock(ContBlock); llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); - llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy); - llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); - ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); - ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); + llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI( + llvm::PointerType::getUnqual(MemTy->getContext()), 2, "vaarg.addr"); + ArgAddr->addIncoming(__current_saved_reg_area_pointer, InRegBlock); + ArgAddr->addIncoming(__overflow_area_pointer, OnStackBlock); return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign)); } diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp index 23c96fa5cf98c..9bb8ddbc548d2 100644 --- a/clang/lib/CodeGen/Targets/SystemZ.cpp +++ b/clang/lib/CodeGen/Targets/SystemZ.cpp @@ -272,7 +272,7 @@ RValue SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(), /*IsParam*/true); if (IsIndirect) { - DirectTy = llvm::PointerType::getUnqual(DirectTy); + DirectTy = llvm::PointerType::getUnqual(DirectTy->getContext()); UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); } else { if (AI.getCoerceToType()) diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp index ced4981fd124f..b7824bde5f55a 100644 --- a/clang/lib/CodeGen/Targets/XCore.cpp +++ b/clang/lib/CodeGen/Targets/XCore.cpp @@ -149,7 +149,7 @@ RValue XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *ArgTy = CGT.ConvertType(Ty); if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) AI.setCoerceToType(ArgTy); - llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); + llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy->getContext()); Address Val = Address::invalid(); CharUnits ArgSize = CharUnits::Zero(); diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp index 9faf2a8a17341..ad2ebb6cd6e6c 100644 --- a/clang/lib/CrossTU/CrossTranslationUnit.cpp +++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp @@ -453,7 +453,8 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFunction( return std::move(IndexLoadError); // Check if there is an entry in the index for the function. - if (!NameFileMap.count(FunctionName)) { + auto It = NameFileMap.find(FunctionName); + if (It == NameFileMap.end()) { ++NumNotInOtherTU; return llvm::make_error(index_error_code::missing_definition); } @@ -461,7 +462,7 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFunction( // Search in the index for the filename where the definition of FunctionName // resides. if (llvm::Expected FoundForFile = - getASTUnitForFile(NameFileMap[FunctionName], DisplayCTUProgress)) { + getASTUnitForFile(It->second, DisplayCTUProgress)) { // Update the cache. NameASTUnitMap[FunctionName] = *FoundForFile; diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c0891d46b0a62..821407687ffa1 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -580,6 +580,7 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C, const ArgList &Args, SanitizerArgs &SanArgs, ArgStringList &CmdArgs) { const Driver &D = TC.getDriver(); + const llvm::Triple &T = TC.getTriple(); auto *PGOGenerateArg = Args.getLastArg(options::OPT_fprofile_generate, options::OPT_fprofile_generate_EQ, options::OPT_fno_profile_generate); @@ -785,6 +786,34 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C, D.Diag(diag::err_drv_unsupported_option_argument) << A->getSpelling() << Val; } + if (const auto *A = Args.getLastArg(options::OPT_fprofile_continuous)) { + if (!PGOGenerateArg && !CSPGOGenerateArg && !ProfileGenerateArg) + D.Diag(clang::diag::err_drv_argument_only_allowed_with) + << A->getSpelling() + << "-fprofile-generate, -fprofile-instr-generate, or " + "-fcs-profile-generate"; + else { + CmdArgs.push_back("-fprofile-continuous"); + // Platforms that require a bias variable: + if (T.isOSBinFormatELF() || T.isOSAIX()) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back("-runtime-counter-relocation"); + } + // -fprofile-instr-generate does not decide the profile file name in the + // FE, and so it does not define the filename symbol + // (__llvm_profile_filename). Instead, the runtime uses the name + // "default.profraw" for the profile file. When continuous mode is ON, we + // will create the filename symbol so that we can insert the "%c" + // modifier. + if (ProfileGenerateArg && + (ProfileGenerateArg->getOption().matches( + options::OPT_fprofile_instr_generate) || + (ProfileGenerateArg->getOption().matches( + options::OPT_fprofile_instr_generate_EQ) && + strlen(ProfileGenerateArg->getValue()) == 0))) + CmdArgs.push_back("-fprofile-instrument-path=default.profraw"); + } + } int FunctionGroups = 1; int SelectedFunctionGroup = 0; @@ -6974,6 +7003,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings); Args.AddLastArg(CmdArgs, options::OPT_funroll_loops, options::OPT_fno_unroll_loops); + Args.AddLastArg(CmdArgs, options::OPT_floop_interchange, + options::OPT_fno_loop_interchange); Args.AddLastArg(CmdArgs, options::OPT_fstrict_flex_arrays_EQ); @@ -9185,7 +9216,7 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, OPT_fno_lto, OPT_flto, OPT_flto_EQ}; - const llvm::DenseSet LinkerOptions{OPT_mllvm}; + const llvm::DenseSet LinkerOptions{OPT_mllvm, OPT_Zlinker_input}; auto ShouldForward = [&](const llvm::DenseSet &Set, Arg *A) { return Set.contains(A->getOption().getID()) || (A->getOption().getGroup().isValid() && @@ -9203,7 +9234,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, ArgStringList CompilerArgs; ArgStringList LinkerArgs; for (Arg *A : C.getArgsForToolChain(TC, /*BoundArch=*/"", Kind)) { - if (ShouldForward(CompilerOptions, A)) + if (A->getOption().matches(OPT_Zlinker_input)) + LinkerArgs.emplace_back(A->getValue()); + else if (ShouldForward(CompilerOptions, A)) A->render(Args, CompilerArgs); else if (ShouldForward(LinkerOptions, A)) A->render(Args, LinkerArgs); @@ -9216,6 +9249,14 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, for (StringRef Arg : LinkerArgs) CmdArgs.push_back(Args.MakeArgString( "--device-linker=" + TC->getTripleString() + "=" + Arg)); + + // Forward the LTO mode relying on the Driver's parsing. + if (C.getDriver().getOffloadLTOMode() == LTOK_Full) + CmdArgs.push_back(Args.MakeArgString( + "--device-compiler=" + TC->getTripleString() + "=-flto=full")); + else if (C.getDriver().getOffloadLTOMode() == LTOK_Thin) + CmdArgs.push_back(Args.MakeArgString( + "--device-compiler=" + TC->getTripleString() + "=-flto=thin")); } } @@ -9223,6 +9264,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, Args.MakeArgString("--host-triple=" + getToolChain().getTripleString())); if (Args.hasArg(options::OPT_v)) CmdArgs.push_back("--wrapper-verbose"); + if (Arg *A = Args.getLastArg(options::OPT_cuda_path_EQ)) + CmdArgs.push_back( + Args.MakeArgString(Twine("--cuda-path=") + A->getValue())); // Construct the link job so we can wrap around it. Linker->ConstructJob(C, JA, Output, Inputs, Args, LinkingOutput); diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 699aadec86dcb..61917db4d780d 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -1321,7 +1321,7 @@ void tools::addOpenMPHostOffloadingArgs(const Compilation &C, /// Add Fortran runtime libs void tools::addFortranRuntimeLibs(const ToolChain &TC, const ArgList &Args, llvm::opt::ArgStringList &CmdArgs) { - // Link FortranRuntime and FortranDecimal + // Link flang_rt.runtime // These are handled earlier on Windows by telling the frontend driver to // add the correct libraries to link against as dependents in the object // file. @@ -1330,15 +1330,14 @@ void tools::addFortranRuntimeLibs(const ToolChain &TC, const ArgList &Args, F128LibName.consume_front_insensitive("lib"); if (!F128LibName.empty()) { bool AsNeeded = !TC.getTriple().isOSAIX(); - CmdArgs.push_back("-lFortranFloat128Math"); + CmdArgs.push_back("-lflang_rt.quadmath"); if (AsNeeded) addAsNeededOption(TC, Args, CmdArgs, /*as_needed=*/true); CmdArgs.push_back(Args.MakeArgString("-l" + F128LibName)); if (AsNeeded) addAsNeededOption(TC, Args, CmdArgs, /*as_needed=*/false); } - CmdArgs.push_back("-lFortranRuntime"); - CmdArgs.push_back("-lFortranDecimal"); + CmdArgs.push_back("-lflang_rt.runtime"); addArchSpecificRPath(TC, Args, CmdArgs); // needs libexecinfo for backtrace functions diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index e4019c4349687..591003f56e8bb 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -346,11 +346,15 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) { assert(TC.getTriple().isKnownWindowsMSVCEnvironment() && "can only add VS runtime library on Windows!"); - // if -fno-fortran-main has been passed, skip linking Fortran_main.a - if (TC.getTriple().isKnownWindowsMSVCEnvironment()) { - CmdArgs.push_back(Args.MakeArgString( - "--dependent-lib=" + TC.getCompilerRTBasename(Args, "builtins"))); - } + + // Flang/Clang (including clang-cl) -compiled programs targeting the MSVC ABI + // should only depend on msv(u)crt. LLVM still emits libgcc/compiler-rt + // functions in some cases like 128-bit integer math (__udivti3, __modti3, + // __fixsfti, __floattidf, ...) that msvc does not support. We are injecting a + // dependency to Compiler-RT's builtin library where these are implemented. + CmdArgs.push_back(Args.MakeArgString( + "--dependent-lib=" + TC.getCompilerRTBasename(Args, "builtins"))); + unsigned RTOptionID = options::OPT__SLASH_MT; if (auto *rtl = Args.getLastArg(options::OPT_fms_runtime_lib_EQ)) { RTOptionID = llvm::StringSwitch(rtl->getValue()) @@ -364,30 +368,26 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args, case options::OPT__SLASH_MT: CmdArgs.push_back("-D_MT"); CmdArgs.push_back("--dependent-lib=libcmt"); - CmdArgs.push_back("--dependent-lib=FortranRuntime.static.lib"); - CmdArgs.push_back("--dependent-lib=FortranDecimal.static.lib"); + CmdArgs.push_back("--dependent-lib=flang_rt.runtime.static.lib"); break; case options::OPT__SLASH_MTd: CmdArgs.push_back("-D_MT"); CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("--dependent-lib=libcmtd"); - CmdArgs.push_back("--dependent-lib=FortranRuntime.static_dbg.lib"); - CmdArgs.push_back("--dependent-lib=FortranDecimal.static_dbg.lib"); + CmdArgs.push_back("--dependent-lib=flang_rt.runtime.static_dbg.lib"); break; case options::OPT__SLASH_MD: CmdArgs.push_back("-D_MT"); CmdArgs.push_back("-D_DLL"); CmdArgs.push_back("--dependent-lib=msvcrt"); - CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic.lib"); - CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic.lib"); + CmdArgs.push_back("--dependent-lib=flang_rt.runtime.dynamic.lib"); break; case options::OPT__SLASH_MDd: CmdArgs.push_back("-D_MT"); CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("-D_DLL"); CmdArgs.push_back("--dependent-lib=msvcrtd"); - CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic_dbg.lib"); - CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic_dbg.lib"); + CmdArgs.push_back("--dependent-lib=flang_rt.runtime.dynamic_dbg.lib"); break; } } diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp index 81797c8c4dc75..32f5ebb55155e 100644 --- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp +++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp @@ -1085,6 +1085,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg, llvm_unreachable("OpenCL-specific cast in Objective-C?"); case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: llvm_unreachable("HLSL-specific cast in Objective-C?"); break; diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index 387daad934f67..0898b69528ebc 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -995,6 +995,7 @@ template <> struct MappingTraits { Style.AlwaysBreakBeforeMultilineStrings); IO.mapOptional("AttributeMacros", Style.AttributeMacros); IO.mapOptional("BinPackArguments", Style.BinPackArguments); + IO.mapOptional("BinPackLongBracedList", Style.BinPackLongBracedList); IO.mapOptional("BinPackParameters", Style.BinPackParameters); IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing); IO.mapOptional("BracedInitializerIndentWidth", @@ -1507,6 +1508,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) { LLVMStyle.AlwaysBreakBeforeMultilineStrings = false; LLVMStyle.AttributeMacros.push_back("__capability"); LLVMStyle.BinPackArguments = true; + LLVMStyle.BinPackLongBracedList = true; LLVMStyle.BinPackParameters = FormatStyle::BPPS_BinPack; LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both; LLVMStyle.BracedInitializerIndentWidth = std::nullopt; diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp index 99bce1f5f0985..fb040a0043602 100644 --- a/clang/lib/Format/FormatToken.cpp +++ b/clang/lib/Format/FormatToken.cpp @@ -175,7 +175,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { // have many items (20 or more) or we allow bin-packing of function call // arguments. if (Style.Cpp11BracedListStyle && !Style.BinPackArguments && - Commas.size() < 19) { + (Commas.size() < 19 || !Style.BinPackLongBracedList)) { return; } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 8b1bbf104ce39..014e629c959e2 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1665,6 +1665,11 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts, else if (!Opts.UnrollLoops && Opts.OptimizationLevel > 1) GenerateArg(Consumer, OPT_fno_unroll_loops); + if (Opts.InterchangeLoops) + GenerateArg(Consumer, OPT_floop_interchange); + else + GenerateArg(Consumer, OPT_fno_loop_interchange); + if (!Opts.BinutilsVersion.empty()) GenerateArg(Consumer, OPT_fbinutils_version_EQ, Opts.BinutilsVersion); @@ -1971,6 +1976,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, Opts.UnrollLoops = Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops, (Opts.OptimizationLevel > 1)); + Opts.InterchangeLoops = + Args.hasFlag(OPT_floop_interchange, OPT_fno_loop_interchange, false); Opts.BinutilsVersion = std::string(Args.getLastArgValue(OPT_fbinutils_version_EQ)); diff --git a/clang/lib/Index/IndexBody.cpp b/clang/lib/Index/IndexBody.cpp index c18daf7faa749..f1dc4d5831ce7 100644 --- a/clang/lib/Index/IndexBody.cpp +++ b/clang/lib/Index/IndexBody.cpp @@ -130,6 +130,9 @@ class BodyIndexer : public RecursiveASTVisitor { void addCallRole(SymbolRoleSet &Roles, SmallVectorImpl &Relations) { + if (isa(ParentDC)) + return; + Roles |= (unsigned)SymbolRole::Call; if (auto *FD = dyn_cast(ParentDC)) Relations.emplace_back((unsigned)SymbolRole::RelationCalledBy, FD); diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp index 54bc52fa2ac40..23be71ad8e2ae 100644 --- a/clang/lib/Sema/SemaCast.cpp +++ b/clang/lib/Sema/SemaCast.cpp @@ -23,6 +23,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" +#include "clang/Sema/SemaHLSL.h" #include "clang/Sema/SemaObjC.h" #include "clang/Sema/SemaRISCV.h" #include "llvm/ADT/SmallVector.h" @@ -2772,6 +2773,22 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, return; } + CheckedConversionKind CCK = FunctionalStyle + ? CheckedConversionKind::FunctionalCast + : CheckedConversionKind::CStyleCast; + // This case should not trigger on regular vector splat + // vector cast, vector truncation, or special hlsl splat cases + QualType SrcTy = SrcExpr.get()->getType(); + if (Self.getLangOpts().HLSL && + Self.HLSL().CanPerformElementwiseCast(SrcExpr.get(), DestType)) { + if (SrcTy->isConstantArrayType()) + SrcExpr = Self.ImpCastExprToType( + SrcExpr.get(), Self.Context.getArrayParameterType(SrcTy), + CK_HLSLArrayRValue, VK_PRValue, nullptr, CCK); + Kind = CK_HLSLElementwiseCast; + return; + } + if (ValueKind == VK_PRValue && !DestType->isRecordType() && !isPlaceholder(BuiltinType::Overload)) { SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get()); @@ -2824,9 +2841,6 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, if (isValidCast(tcr)) Kind = CK_NoOp; - CheckedConversionKind CCK = FunctionalStyle - ? CheckedConversionKind::FunctionalCast - : CheckedConversionKind::CStyleCast; if (tcr == TC_NotApplicable) { tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg, Kind); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 2cb389eefaac2..ec6b5b45de42b 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -2706,6 +2706,150 @@ bool SemaHLSL::CheckCompatibleParameterABI(FunctionDecl *New, return HadError; } +// Generally follows PerformScalarCast, with cases reordered for +// clarity of what types are supported +bool SemaHLSL::CanPerformScalarCast(QualType SrcTy, QualType DestTy) { + + if (SemaRef.getASTContext().hasSameUnqualifiedType(SrcTy, DestTy)) + return true; + + switch (SrcTy->getScalarTypeKind()) { + case Type::STK_Bool: // casting from bool is like casting from an integer + case Type::STK_Integral: + switch (DestTy->getScalarTypeKind()) { + case Type::STK_Bool: + case Type::STK_Integral: + case Type::STK_Floating: + return true; + case Type::STK_CPointer: + case Type::STK_ObjCObjectPointer: + case Type::STK_BlockPointer: + case Type::STK_MemberPointer: + llvm_unreachable("HLSL doesn't support pointers."); + case Type::STK_IntegralComplex: + case Type::STK_FloatingComplex: + llvm_unreachable("HLSL doesn't support complex types."); + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + } + llvm_unreachable("Should have returned before this"); + + case Type::STK_Floating: + switch (DestTy->getScalarTypeKind()) { + case Type::STK_Floating: + case Type::STK_Bool: + case Type::STK_Integral: + return true; + case Type::STK_FloatingComplex: + case Type::STK_IntegralComplex: + llvm_unreachable("HLSL doesn't support complex types."); + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + case Type::STK_CPointer: + case Type::STK_ObjCObjectPointer: + case Type::STK_BlockPointer: + case Type::STK_MemberPointer: + llvm_unreachable("HLSL doesn't support pointers."); + } + llvm_unreachable("Should have returned before this"); + + case Type::STK_MemberPointer: + case Type::STK_CPointer: + case Type::STK_BlockPointer: + case Type::STK_ObjCObjectPointer: + llvm_unreachable("HLSL doesn't support pointers."); + + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + + case Type::STK_FloatingComplex: + case Type::STK_IntegralComplex: + llvm_unreachable("HLSL doesn't support complex types."); + } + + llvm_unreachable("Unhandled scalar cast"); +} + +// Detect if a type contains a bitfield. Will be removed when +// bitfield support is added to HLSLElementwiseCast +bool SemaHLSL::ContainsBitField(QualType BaseTy) { + llvm::SmallVector WorkList; + WorkList.push_back(BaseTy); + while (!WorkList.empty()) { + QualType T = WorkList.pop_back_val(); + T = T.getCanonicalType().getUnqualifiedType(); + // only check aggregate types + if (const auto *AT = dyn_cast(T)) { + WorkList.push_back(AT->getElementType()); + continue; + } + if (const auto *RT = dyn_cast(T)) { + const RecordDecl *RD = RT->getDecl(); + if (RD->isUnion()) + continue; + + const CXXRecordDecl *CXXD = dyn_cast(RD); + + if (CXXD && CXXD->isStandardLayout()) + RD = CXXD->getStandardLayoutBaseWithFields(); + + for (const auto *FD : RD->fields()) { + if (FD->isBitField()) + return true; + WorkList.push_back(FD->getType()); + } + continue; + } + } + return false; +} + +// Can we perform an HLSL Elementwise cast? +// TODO: update this code when matrices are added; see issue #88060 +bool SemaHLSL::CanPerformElementwiseCast(Expr *Src, QualType DestTy) { + + // Don't handle casts where LHS and RHS are any combination of scalar/vector + // There must be an aggregate somewhere + QualType SrcTy = Src->getType(); + if (SrcTy->isScalarType()) // always a splat and this cast doesn't handle that + return false; + + if (SrcTy->isVectorType() && + (DestTy->isScalarType() || DestTy->isVectorType())) + return false; + + if (ContainsBitField(DestTy) || ContainsBitField(SrcTy)) + return false; + + llvm::SmallVector DestTypes; + BuildFlattenedTypeList(DestTy, DestTypes); + llvm::SmallVector SrcTypes; + BuildFlattenedTypeList(SrcTy, SrcTypes); + + // Usually the size of SrcTypes must be greater than or equal to the size of + // DestTypes. + if (SrcTypes.size() < DestTypes.size()) + return false; + + unsigned SrcSize = SrcTypes.size(); + unsigned DstSize = DestTypes.size(); + unsigned I; + for (I = 0; I < DstSize && I < SrcSize; I++) { + if (SrcTypes[I]->isUnionType() || DestTypes[I]->isUnionType()) + return false; + if (!CanPerformScalarCast(SrcTypes[I], DestTypes[I])) { + return false; + } + } + + // check the rest of the source type for unions. + for (; I < SrcSize; I++) { + if (SrcTypes[I]->isUnionType()) + return false; + } + return true; +} + ExprResult SemaHLSL::ActOnOutParamExpr(ParmVarDecl *Param, Expr *Arg) { assert(Param->hasAttr() && "We should not get here without a parameter modifier expression"); diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp index f206cd57eca89..308222a79d920 100644 --- a/clang/lib/Sema/SemaInit.cpp +++ b/clang/lib/Sema/SemaInit.cpp @@ -4862,9 +4862,13 @@ static void TryListInitialization(Sema &S, assert( S.Context.hasSameUnqualifiedType(SubInit[0]->getType(), DestType) && "Deduced to other type?"); + assert(Kind.getKind() == clang::InitializationKind::IK_DirectList && + "List-initialize structured bindings but not " + "direct-list-initialization?"); TryArrayCopy(S, - InitializationKind::CreateCopy(Kind.getLocation(), - InitList->getLBraceLoc()), + InitializationKind::CreateDirect(Kind.getLocation(), + InitList->getLBraceLoc(), + InitList->getRBraceLoc()), Entity, SubInit[0], DestType, Sequence, TreatUnavailableAsInvalid); if (Sequence) diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index b060039d188a1..376995d624e28 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -22819,8 +22819,12 @@ class GlobalDeclRefChecker final : public StmtVisitor { void declareTargetInitializer(Decl *TD) { A = TD->getAttr(); DeclVector.push_back(cast(TD)); + llvm::SmallDenseSet Visited; while (!DeclVector.empty()) { VarDecl *TargetVarDecl = DeclVector.pop_back_val(); + if (!Visited.insert(TargetVarDecl).second) + continue; + if (TargetVarDecl->hasAttr() && TargetVarDecl->hasInit() && TargetVarDecl->hasGlobalStorage()) { if (Expr *Ex = TargetVarDecl->getInit()) diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp index 53ef423bd82e7..a56f48c83c660 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp @@ -109,9 +109,7 @@ class UncountedLambdaCapturesChecker bool VisitCallExpr(CallExpr *CE) override { checkCalleeLambda(CE); if (auto *Callee = CE->getDirectCallee()) { - unsigned ArgIndex = 0; - if (auto *CXXCallee = dyn_cast(Callee)) - ArgIndex = CXXCallee->isInstance(); + unsigned ArgIndex = isa(CE); bool TreatAllArgsAsNoEscape = shouldTreatAllArgAsNoEscape(Callee); for (auto *Param : Callee->parameters()) { if (ArgIndex >= CE->getNumArgs()) diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp index 7a900780384a9..3a983421358c7 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp @@ -522,6 +522,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, case CK_ToUnion: case CK_MatrixCast: case CK_VectorSplat: + case CK_HLSLElementwiseCast: case CK_HLSLVectorTruncation: { QualType resultType = CastE->getType(); if (CastE->isGLValue()) diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp index 2a1a164557cdb..4f4a960282253 100644 --- a/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp +++ b/clang/test/Analysis/Checkers/WebKit/uncounted-lambda-captures.cpp @@ -63,6 +63,19 @@ template Function adopt(Detail::Callab return Function(impl, Function::Adopt); } +template +class HashMap { +public: + HashMap(); + HashMap([[clang::noescape]] const Function&); + void ensure(const KeyType&, [[clang::noescape]] const Function&); + bool operator+([[clang::noescape]] const Function&) const; + static void ifAny(HashMap, [[clang::noescape]] const Function&); + +private: + ValueType* m_table { nullptr }; +}; + } // namespace WTF struct A { @@ -268,6 +281,24 @@ struct RefCountableWithLambdaCapturingThis { nonTrivial(); }); } + + static void callLambda([[clang::noescape]] const WTF::Function()>&); + void method_captures_this_in_template_method() { + RefCountable* obj = make_obj(); + WTF::HashMap> nextMap; + nextMap.ensure(3, [&] { + return obj->next(); + }); + nextMap+[&] { + return obj->next(); + }; + WTF::HashMap>::ifAny(nextMap, [&](auto& item) -> bool { + return item->next() && obj->next(); + }); + callLambda([&]() -> RefPtr { + return obj->next(); + }); + } }; struct NonRefCountableWithLambdaCapturingThis { diff --git a/clang/test/Analysis/NewDelete-checker-test.cpp b/clang/test/Analysis/NewDelete-checker-test.cpp index 21b4cf817b5df..06754f669b1e6 100644 --- a/clang/test/Analysis/NewDelete-checker-test.cpp +++ b/clang/test/Analysis/NewDelete-checker-test.cpp @@ -441,3 +441,31 @@ void testLeakBecauseNTTPIsNotDeallocation() { void* p = ::operator new(10); deallocate_via_nttp(p); } // leak-warning{{Potential leak of memory pointed to by 'p'}} + +namespace optional_union { + template + class unique_ptr { + T *q; + public: + unique_ptr() : q(new T) {} + ~unique_ptr() { + delete q; + } + }; + + union custom_union_t { + unique_ptr present; + char notpresent; + custom_union_t() : present(unique_ptr()) {} + ~custom_union_t() {} + }; + + void testUnionCorrect() { + custom_union_t a; + a.present.~unique_ptr(); + } + + void testUnionLeak() { + custom_union_t a; + } // leak-warning{{Potential leak of memory pointed to by 'a.present.q'}} +} diff --git a/clang/test/Analysis/dtor-union.cpp b/clang/test/Analysis/dtor-union.cpp new file mode 100644 index 0000000000000..dac366e6f9df8 --- /dev/null +++ b/clang/test/Analysis/dtor-union.cpp @@ -0,0 +1,38 @@ +// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config c++-inlining=destructors -verify -std=c++11 %s +// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config c++-inlining=destructors -verify -std=c++17 %s + +void clang_analyzer_eval(bool); + +struct InlineDtor { + static int cnt; + static int dtorCalled; + ~InlineDtor() { + ++dtorCalled; + } +}; + +int InlineDtor::cnt = 0; +int InlineDtor::dtorCalled = 0; + +void testUnionDtor() { + static int unionDtorCalled; + InlineDtor::cnt = 0; + InlineDtor::dtorCalled = 0; + unionDtorCalled = 0; + { + union UnionDtor { + InlineDtor kind1; + char kind2; + ~UnionDtor() { unionDtorCalled++; } + }; + UnionDtor u1{.kind1{}}; + UnionDtor u2{.kind2{}}; + auto u3 = new UnionDtor{.kind1{}}; + auto u4 = new UnionDtor{.kind2{}}; + delete u3; + delete u4; + } + + clang_analyzer_eval(unionDtorCalled == 4); // expected-warning {{TRUE}} + clang_analyzer_eval(InlineDtor::dtorCalled == 0); // expected-warning {{TRUE}} +} diff --git a/clang/test/CodeGen/debug-info-enum-extensibility.c b/clang/test/CodeGen/debug-info-enum-extensibility.c new file mode 100644 index 0000000000000..4f8a42bff3f01 --- /dev/null +++ b/clang/test/CodeGen/debug-info-enum-extensibility.c @@ -0,0 +1,49 @@ +// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s + +// CHECK-NOT: enumKind +// CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "ClosedEnum" +// CHECK-SAME: enumKind: DW_APPLE_ENUM_KIND_Closed) +// CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "OpenEnum" +// CHECK-SAME: enumKind: DW_APPLE_ENUM_KIND_Open) +// CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "ClosedFlagEnum" +// CHECK-SAME: enumKind: DW_APPLE_ENUM_KIND_Closed) +// CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "OpenFlagEnum" +// CHECK-SAME: enumKind: DW_APPLE_ENUM_KIND_Open) +// CHECK: !DICompositeType(tag: DW_TAG_enumeration_type, name: "MixedEnum" +// CHECK-SAME: enumKind: DW_APPLE_ENUM_KIND_Open) + +enum Enum { + E0, E1 +}; + +enum FlagEnum { + FE0 = 1 << 0, FE1 = 1 << 1 +}; + +enum __attribute__((enum_extensibility(closed))) ClosedEnum { + A0, A1 +}; + +enum __attribute__((enum_extensibility(open))) OpenEnum { + B0, B1 +}; + +enum __attribute__((enum_extensibility(closed),flag_enum)) ClosedFlagEnum { + C0 = 1 << 0, C1 = 1 << 1 +}; + +enum __attribute__((enum_extensibility(open),flag_enum)) OpenFlagEnum { + D0 = 1 << 0, D1 = 1 << 1 +}; + +enum __attribute__((enum_extensibility(open), enum_extensibility(closed))) MixedEnum { + M0, M1 +}; + +enum Enum e; +enum FlagEnum fe; +enum ClosedEnum ce; +enum OpenEnum oe; +enum ClosedFlagEnum cfe; +enum OpenFlagEnum ofe; +enum MixedEnum me; diff --git a/clang/test/CodeGen/profile-continuous.c b/clang/test/CodeGen/profile-continuous.c new file mode 100644 index 0000000000000..86fa1d149b971 --- /dev/null +++ b/clang/test/CodeGen/profile-continuous.c @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -emit-llvm -fprofile-instrument=llvm -fprofile-continuous %s -o - | FileCheck %s --check-prefix=IRPGO +// RUN: %clang_cc1 -emit-llvm -fprofile-instrument=llvm -fprofile-continuous -fprofile-instrument-path=mydir/default_%m.profraw -mllvm -runtime-counter-relocation %s -o - \ +// RUN: | FileCheck %s --check-prefix=IRPGO_EQ +// RUN: %clang_cc1 -emit-llvm -O2 -fprofile-instrument=csllvm -fprofile-continuous %s -o - | FileCheck %s --check-prefix=CSIRPGO +// RUN: %clang_cc1 -emit-llvm -fprofile-instrument=clang -fprofile-continuous -fprofile-instrument-path=default.profraw %s -o - | FileCheck %s --check-prefix=CLANG_PGO + +// IRPGO: @__llvm_profile_filename = {{.*}} c"%cdefault_%m.profraw\00" +// IRPGO_EQ: @__llvm_profile_filename = {{.*}} c"%cmydir/default_%m.profraw\00" +// CSIRPGO: @__llvm_profile_filename = {{.*}} c"%cdefault_%m.profraw\00" +// CLANG_PGO: @__llvm_profile_filename = {{.*}} c"%cdefault.profraw\00" +void foo(){} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl new file mode 100644 index 0000000000000..18f82bff3b308 --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl @@ -0,0 +1,144 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -disable-llvm-passes -emit-llvm -finclude-default-header -o - %s | FileCheck %s + +// array truncation to a scalar +// CHECK-LABEL: define void {{.*}}call0 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca float, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[B]], align 4 +export void call0() { + int A[2] = {0,1}; + float B = (float)A; +} + +// array truncation +// CHECK-LABEL: define void {{.*}}call1 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +export void call1() { + int A[2] = {0,1}; + int B[1] = {4}; + B = (int[1])A; +} + +// just a cast +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x float], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[A]], i8 0, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 4, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x float], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [1 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L]] to float +// CHECK-NEXT: store float [[C]], ptr [[G1]], align 4 +export void call2() { + int A[1] = {0}; + float B[1] = {1.0}; + B = (float[1])A; +} + +// vector to array +// CHECK-LABEL: define void {{.*}}call3 +// CHECK: [[A:%.*]] = alloca <1 x float>, align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: store <1 x float> splat (float 0x3FF3333340000000), ptr [[A]], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: [[C:%.*]] = load <1 x float>, ptr [[A]], align 4 +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[V:%.*]] = extractelement <1 x float> [[C]], i64 0 +// CHECK-NEXT: [[C:%.*]] = fptosi float [[V]] to i32 +// CHECK-NEXT: store i32 [[C]], ptr [[G1]], align 4 +export void call3() { + float1 A = {1.2}; + int B[1] = {1}; + B = (int[1])A; +} + +// flatten array of vector to array with cast +// CHECK-LABEL: define void {{.*}}call5 +// CHECK: [[A:%.*]] = alloca [1 x <2 x float>], align 8 +// CHECK-NEXT: [[B:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x <2 x float>], align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[A]], ptr align 8 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[Tmp]], ptr align 8 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[VG:%.*]] = getelementptr inbounds [1 x <2 x float>], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[L:%.*]] = load <2 x float>, ptr [[VG]], align 8 +// CHECK-NEXT: [[VL:%.*]] = extractelement <2 x float> [[L]], i32 0 +// CHECK-NEXT: [[C:%.*]] = fptosi float [[VL]] to i32 +// CHECK-NEXT: store i32 [[C]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load <2 x float>, ptr [[VG]], align 8 +// CHECK-NEXT: [[VL5:%.*]] = extractelement <2 x float> [[L4]], i32 1 +// CHECK-NEXT: [[C6:%.*]] = fptosi float [[VL5]] to i32 +// CHECK-NEXT: store i32 [[C6]], ptr [[G2]], align 4 +export void call5() { + float2 A[1] = {{1.2,3.4}}; + int B[2] = {1,2}; + B = (int[2])A; +} + +// flatten 2d array +// CHECK-LABEL: define void {{.*}}call6 +// CHECK: [[A:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 1, i32 0 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: store i32 [[L4]], ptr [[G2]], align 4 +export void call6() { + int A[2][1] = {{1},{3}}; + int B[2] = {1,2}; + B = (int[2])A; +} + +struct S { + int X; + float Y; +}; + +// flatten and truncate from a struct +// CHECK-LABEL: define void {{.*}}call7 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +export void call7() { + S s = {1, 2.9}; + int A[1] = {1}; + A = (int[1])s; +} + diff --git a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl new file mode 100644 index 0000000000000..26fde37c901dd --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s + +struct S { + int X; + float Y; +}; + +// struct truncation to a scalar +// CHECK-LABEL: define void {{.*}}call0 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[A]], align 4 +export void call0() { + S s = {1,2}; + int A = (int)s; +} + +// struct from vector +// CHECK-LABEL: define void {{.*}}call1 +// CHECK: [[A:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: store <2 x i32> , ptr [[A]], align 8 +// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[A]], align 8 +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[VL:%.*]] = extractelement <2 x i32> [[L]], i64 0 +// CHECK-NEXT: store i32 [[VL]], ptr [[G1]], align 4 +// CHECK-NEXT: [[VL2:%.*]] = extractelement <2 x i32> [[L]], i64 1 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[VL2]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call1() { + int2 A = {1,2}; + S s = (S)A; +} + + +// struct from array +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L4]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call2() { + int A[2] = {1,2}; + S s = (S)A; +} + +struct Q { + int Z; +}; + +struct R { + Q q; + float F; +}; + +// struct from nested struct? +// CHECK-LABEL: define void {{.*}}call6 +// CHECK: [[r:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[r]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[r]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.R, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds %struct.R, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load float, ptr [[G4]], align 4 +// CHECK-NEXT: store float [[L4]], ptr [[G2]], align 4 +export void call6() { + R r = {{1}, 2.0}; + S s = (S)r; +} + +// nested struct from array? +// CHECK-LABEL: define void {{.*}}call7 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[r:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.R, ptr [[r]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.R, ptr [[r]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L4]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call7() { + int A[2] = {1,2}; + R r = (R)A; +} + +struct T { + int A; + int B; + int C; +}; + +// struct truncation +// CHECK-LABEL: define void {{.*}}call8 +// CHECK: [[t:%.*]] = alloca %struct.T, align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.T, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[t]], ptr align 4 {{.*}}, i32 12, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[t]], i32 12, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.T, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: %gep3 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 1 +// CHECK-NEXT: %gep4 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 2 +// CHECK-NEXT: %load = load i32, ptr %gep2, align 4 +// CHECK-NEXT: store i32 %load, ptr %gep, align 4 +// CHECK-NEXT: %load5 = load i32, ptr %gep3, align 4 +// CHECK-NEXT: %conv = sitofp i32 %load5 to float +// CHECK-NEXT: store float %conv, ptr %gep1, align 4 +export void call8() { + T t = {1,2,3}; + S s = (S)t; +} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl new file mode 100644 index 0000000000000..f579dfb377de5 --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl @@ -0,0 +1,81 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s + +// vector flat cast from array +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 1, i32 0 +// CHECK-NEXT: [[C:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: [[D:%.*]] = insertelement <2 x i32> [[C]], i32 [[L]], i64 0 +// CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[D]], i32 [[L2]], i64 1 +// CHECK-NEXT: store <2 x i32> [[E]], ptr [[B]], align 8 +export void call2() { + int A[2][1] = {{1},{2}}; + int2 B = (int2)A; +} + +struct S { + int X; + float Y; +}; + +// vector flat cast from struct +// CHECK-LABEL: define void {{.*}}call3 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[B:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: [[C:%.*]] = insertelement <2 x i32> [[B]], i32 [[L]], i64 0 +// CHECK-NEXT: [[L2:%.*]] = load float, ptr [[G2]], align 4 +// CHECK-NEXT: [[D:%.*]] = fptosi float [[L2]] to i32 +// CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[C]], i32 [[D]], i64 1 +// CHECK-NEXT: store <2 x i32> [[E]], ptr [[A]], align 8 +export void call3() { + S s = {1, 2.0}; + int2 A = (int2)s; +} + +// truncate array to scalar +// CHECK-LABEL: define void {{.*}}call4 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[B]], align 4 +export void call4() { + int A[2] = {1,2}; + int B = (int)A; +} + +// truncate struct to scalar +// CHECK-LABEL: define void {{.*}}call5 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[A]], align 4 +export void call5() { + S s = {1, 2.0}; + int A = (int)s; +} diff --git a/clang/test/CodeGenHLSL/resource-bindings.hlsl b/clang/test/CodeGenHLSL/resource-bindings.hlsl index bfa7896bd9811..57e8cc29572b1 100644 --- a/clang/test/CodeGenHLSL/resource-bindings.hlsl +++ b/clang/test/CodeGenHLSL/resource-bindings.hlsl @@ -2,14 +2,17 @@ // CHECK: define internal void @_init_resource_U0S0() // CHECK: %U0S0_h = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_v4f32_1_0_0t(i32 0, i32 0, i32 1, i32 0, i1 false) +// CHECK: store target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %U0S0_h, ptr @U0S0, align 4 RWBuffer U0S0 : register(u0); // CHECK: define internal void @_init_resource_U5S3() // CHECK: %U5S3_h = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0_0t(i32 3, i32 5, i32 1, i32 0, i1 false) +// CHECK: store target("dx.TypedBuffer", float, 1, 0, 0) %U5S3_h, ptr @U5S3, align 4 RWBuffer U5S3 : register(u5, space3); // CHECK: define internal void @_init_resource_T2S2() // CHECK: %T2S2_h = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_0_0t(i32 2, i32 2, i32 1, i32 0, i1 false) +// CHECK: store target("dx.RawBuffer", i32, 0, 0) %T2S2_h, ptr @T2S2, align 4 StructuredBuffer T2S2 : register(t2, space2); struct S { float4 f; @@ -18,6 +21,7 @@ struct S { // CHECK: define internal void @_init_resource_T3S0() // CHECK: %T3S0_h = call target("dx.RawBuffer", %struct.S, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_s_struct.Ss_0_0t(i32 0, i32 3, i32 1, i32 0, i1 false) +// CHECK: store target("dx.RawBuffer", %struct.S, 0, 0) %T3S0_h, ptr @T3S0, align 4 StructuredBuffer T3S0 : register(t3); // CHECK: define void @main() diff --git a/clang/test/Driver/clang_f_opts.c b/clang/test/Driver/clang_f_opts.c index 38f25898c9556..7454ce3d30f5f 100644 --- a/clang/test/Driver/clang_f_opts.c +++ b/clang/test/Driver/clang_f_opts.c @@ -45,6 +45,13 @@ // CHECK-UNROLL-LOOPS: "-funroll-loops" // CHECK-NO-UNROLL-LOOPS: "-fno-unroll-loops" +// RUN: %clang -### -S -floop-interchange %s 2>&1 | FileCheck -check-prefix=CHECK-INTERCHANGE-LOOPS %s +// RUN: %clang -### -S -fno-loop-interchange %s 2>&1 | FileCheck -check-prefix=CHECK-NO-INTERCHANGE-LOOPS %s +// RUN: %clang -### -S -fno-loop-interchange -floop-interchange %s 2>&1 | FileCheck -check-prefix=CHECK-INTERCHANGE-LOOPS %s +// RUN: %clang -### -S -floop-interchange -fno-loop-interchange %s 2>&1 | FileCheck -check-prefix=CHECK-NO-INTERCHANGE-LOOPS %s +// CHECK-INTERCHANGE-LOOPS: "-floop-interchange" +// CHECK-NO-INTERCHANGE-LOOPS: "-fno-loop-interchange" + // RUN: %clang -### -S -fprofile-sample-accurate %s 2>&1 | FileCheck -check-prefix=CHECK-PROFILE-SAMPLE-ACCURATE %s // CHECK-PROFILE-SAMPLE-ACCURATE: "-fprofile-sample-accurate" diff --git a/clang/test/Driver/fprofile-continuous.c b/clang/test/Driver/fprofile-continuous.c new file mode 100644 index 0000000000000..81719fb70cb1e --- /dev/null +++ b/clang/test/Driver/fprofile-continuous.c @@ -0,0 +1,21 @@ +// 1) test on platforms that (do or do not) require runtime relocation + +// RUN: %clang --target=x86_64-darwin -fprofile-generate -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=NO_RELOC +// NO_RELOC: "-cc1" {{.*}} "-fprofile-continuous" +// NO_RELOC-NOT: "-mllvm" "-runtime-counter-relocation" + +// RUN: %clang --target=powerpc64-ibm-aix -fprofile-generate -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=RELOC +// RUN: %clang --target=x86_64-unknown-fuchsia -fprofile-generate -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=RELOC +// RELOC: "-cc1" {{.*}} "-fprofile-continuous" "-mllvm" "-runtime-counter-relocation" + +// 2) test -fprofile-continuous with cs-profile-generate and -fprofile-instr-generate + +// RUN: %clang --target=powerpc-ibm-aix -fprofile-instr-generate -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=CLANG_PGO +// RUN: %clang --target=powerpc64le-unknown-linux -fprofile-instr-generate= -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=CLANG_PGO +// CLANG_PGO: "-cc1" {{.*}} "-fprofile-continuous" "-mllvm" "-runtime-counter-relocation" "-fprofile-instrument-path=default.profraw" + +// RUN: %clang --target=x86_64-unknown-fuchsia -fcs-profile-generate -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=RELOC + +// RUN: not %clang -fprofile-continuous -### -c %s 2>&1 | FileCheck %s --check-prefix=ERROR +// ERROR: error: invalid argument '-fprofile-continuous' only allowed with '-fprofile-generate, -fprofile-instr-generate, or -fcs-profile-generate' +void foo(){} diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c index f416ee5f4463b..e7b7af7bdfbf3 100644 --- a/clang/test/Driver/linker-wrapper.c +++ b/clang/test/Driver/linker-wrapper.c @@ -21,16 +21,16 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=NVPTX-LINK -// NVPTX-LINK: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 -flto {{.*}}.o {{.*}}.o +// NVPTX-LINK: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}.o {{.*}}.o // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out -// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --device-debug -O0 \ +// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --device-compiler=-g \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=NVPTX-LINK-DEBUG -// NVPTX-LINK-DEBUG: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 -flto {{.*}}.o {{.*}}.o -g +// NVPTX-LINK-DEBUG: clang{{.*}} --target=nvptx64-nvidia-cuda -march=sm_70 {{.*}}-g // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx908 \ @@ -39,16 +39,16 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=AMDGPU-LINK -// AMDGPU-LINK: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o +// AMDGPU-LINK: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -Wl,--no-undefined {{.*}}.o {{.*}}.o // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.amdgpu.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030 \ // RUN: --image=file=%t.amdgpu.bc,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx1030 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out -// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --save-temps -O2 \ +// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --device-compiler=--save-temps \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=AMDGPU-LTO-TEMPS -// AMDGPU-LTO-TEMPS: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx1030 -O2 -flto -Wl,--no-undefined {{.*}}.o -save-temps +// AMDGPU-LTO-TEMPS: clang{{.*}} --target=amdgcn-amd-amdhsa -mcpu=gfx1030 {{.*}}-save-temps // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=x86_64-unknown-linux-gnu \ @@ -59,7 +59,7 @@ __attribute__((visibility("protected"), used)) int x; // RUN: --linker-path=/usr/bin/ld.lld --whole-archive %t.a --no-whole-archive \ // RUN: %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=CPU-LINK -// CPU-LINK: clang{{.*}} -o {{.*}}.img --target=x86_64-unknown-linux-gnu -march=native -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o -Wl,-Bsymbolic -shared -Wl,--whole-archive {{.*}}.a -Wl,--no-whole-archive +// CPU-LINK: clang{{.*}} -o {{.*}}.img --target=x86_64-unknown-linux-gnu -march=native -Wl,--no-undefined {{.*}}.o {{.*}}.o -Wl,-Bsymbolic -shared -Wl,--whole-archive {{.*}}.a -Wl,--no-whole-archive // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o // RUN: clang-linker-wrapper --dry-run --host-triple=x86_64-unknown-linux-gnu -mllvm -openmp-opt-disable \ @@ -148,7 +148,7 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --clang-backend \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=CLANG-BACKEND -// CLANG-BACKEND: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -O2 -flto -Wl,--no-undefined {{.*}}.o +// CLANG-BACKEND: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -Wl,--no-undefined {{.*}}.o // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 @@ -171,8 +171,8 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t-on.o %t-off.o %t.a -o a.out 2>&1 | FileCheck %s --check-prefix=AMD-TARGET-ID -// AMD-TARGET-ID: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a:xnack+ -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o -// AMD-TARGET-ID: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a:xnack- -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o +// AMD-TARGET-ID: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a:xnack+ -Wl,--no-undefined {{.*}}.o {{.*}}.o +// AMD-TARGET-ID: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a:xnack- -Wl,--no-undefined {{.*}}.o {{.*}}.o // RUN: clang-offload-packager -o %t-lib.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=generic @@ -187,8 +187,8 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t1.o %t2.o %t.a -o a.out 2>&1 | FileCheck %s --check-prefix=ARCH-ALL -// ARCH-ALL: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o -// ARCH-ALL: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -O2 -flto -Wl,--no-undefined {{.*}}.o {{.*}}.o +// ARCH-ALL: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx90a -Wl,--no-undefined {{.*}}.o {{.*}}.o +// ARCH-ALL: clang{{.*}} -o {{.*}}.img --target=amdgcn-amd-amdhsa -mcpu=gfx908 -Wl,--no-undefined {{.*}}.o {{.*}}.o // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.elf.o,kind=openmp,triple=x86_64-unknown-linux-gnu \ diff --git a/clang/test/Driver/offload-Xarch.c b/clang/test/Driver/offload-Xarch.c index 17db891b06834..8856dac198465 100644 --- a/clang/test/Driver/offload-Xarch.c +++ b/clang/test/Driver/offload-Xarch.c @@ -14,7 +14,7 @@ // RUN: --target=x86_64-unknown-linux-gnu -Xopenmp-target=nvptx64-nvidia-cuda --offload-arch=sm_52,sm_60 -nogpuinc \ // RUN: -Xopenmp-target=amdgcn-amd-amdhsa --offload-arch=gfx90a,gfx1030 -ccc-print-bindings -### %s 2>&1 \ // RUN: | FileCheck -check-prefix=OPENMP %s -// + // OPENMP: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HOST_BC:.+]]" // OPENMP: # "amdgcn-amd-amdhsa" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[GFX1030_BC:.+]]" // OPENMP: # "amdgcn-amd-amdhsa" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[GFX90A_BC:.+]]" @@ -32,3 +32,12 @@ // CUDA: "-cc1" "-triple" "nvptx64-nvidia-cuda" {{.*}}"-target-cpu" "sm_52" {{.*}}"-O3" // CUDA: "-cc1" "-triple" "nvptx64-nvidia-cuda" {{.*}}"-target-cpu" "sm_60" {{.*}}"-O0" // CUDA: "-cc1" "-triple" "x86_64-unknown-linux-gnu" {{.*}}"-O3" + +// Make sure that `-Xarch_amdgcn` forwards libraries to the device linker. +// RUN: %clang -fopenmp=libomp --offload-arch=gfx90a -nogpulib -nogpuinc \ +// RUN: --target=x86_64-unknown-linux-gnu -Xarch_amdgcn -Wl,-lfoo -### %s 2>&1 \ +// RUN: | FileCheck -check-prefix=LIBS %s +// RUN: %clang -fopenmp=libomp --offload-arch=gfx90a -nogpulib -nogpuinc \ +// RUN: -Xoffload-linker-amdgcn-amd-amdhsa -lfoo -### %s 2>&1 \ +// RUN: | FileCheck -check-prefix=LIBS %s +// LIBS: "--device-linker=amdgcn-amd-amdhsa=-lfoo" diff --git a/clang/test/Driver/openmp-offload.c b/clang/test/Driver/openmp-offload.c index 6f56ae00ba065..2cf2643af6c15 100644 --- a/clang/test/Driver/openmp-offload.c +++ b/clang/test/Driver/openmp-offload.c @@ -208,3 +208,13 @@ // RUN: -fsyntax-only %s 2>&1 | FileCheck -check-prefix=CHK-SYNTAX-ONLY-ARGS %s // CHK-SYNTAX-ONLY-ARGS: "-cc1" "-triple" "powerpc64le-ibm-linux-gnu"{{.*}}"-fsyntax-only" // CHK-SYNTAX-ONLY-ARGS: "-cc1" "-triple" "powerpc64le-unknown-linux"{{.*}}"-fsyntax-only" + +// +// Ensure `-foffload-lto` is forwarded properly. +// +// RUN: %clang -### --target=powerpc64le-linux -fopenmp=libomp -fopenmp-targets=powerpc64le-ibm-linux-gnu \ +// RUN: -foffload-lto %s 2>&1 | FileCheck -check-prefix=CHK-DEVICE-LTO-FULL %s +// CHK-DEVICE-LTO-FULL: clang-linker-wrapper{{.*}} "--device-compiler=powerpc64le-ibm-linux-gnu=-flto=full" +// RUN: %clang -### --target=powerpc64le-linux -fopenmp=libomp -fopenmp-targets=powerpc64le-ibm-linux-gnu \ +// RUN: -foffload-lto=thin %s 2>&1 | FileCheck -check-prefix=CHK-DEVICE-LTO-THIN %s +// CHK-DEVICE-LTO-THIN: clang-linker-wrapper{{.*}} "--device-compiler=powerpc64le-ibm-linux-gnu=-flto=thin" diff --git a/clang/test/ExtractAPI/typedef_underscore.c b/clang/test/ExtractAPI/typedef_underscore.c new file mode 100644 index 0000000000000..a42046907b46d --- /dev/null +++ b/clang/test/ExtractAPI/typedef_underscore.c @@ -0,0 +1,69 @@ +// RUN: rm -rf %t +// RUN: %clang_cc1 -extract-api --pretty-sgf --emit-sgf-symbol-labels-for-testing \ +// RUN: --product-name=TypedefChain -triple arm64-apple-macosx -x c-header %s -o %t/typedefchain-c.symbols.json -verify +// RUN: %clang_cc1 -extract-api --pretty-sgf --emit-sgf-symbol-labels-for-testing \ +// RUN: --product-name=TypedefChain -triple arm64-apple-macosx -x c++-header %s -o %t/typedefchain-cxx.symbols.json -verify + +// RUN: FileCheck %s --input-file %t/typedefchain-c.symbols.json --check-prefix MYSTRUCT +// RUN: FileCheck %s --input-file %t/typedefchain-cxx.symbols.json --check-prefix MYSTRUCT +typedef struct _MyStruct { } MyStruct; + +// MYSTRUCT-LABEL: "!testLabel": "c:@S@_MyStruct" +// MYSTRUCT: "accessLevel": "public", +// MYSTRUCT: "declarationFragments": [ +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "keyword", +// MYSTRUCT-NEXT: "spelling": "typedef" +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "text", +// MYSTRUCT-NEXT: "spelling": " " +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "keyword", +// MYSTRUCT-NEXT: "spelling": "struct" +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "text", +// MYSTRUCT-NEXT: "spelling": " " +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "identifier", +// MYSTRUCT-NEXT: "spelling": "_MyStruct" +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "text", +// MYSTRUCT-NEXT: "spelling": " { ... } " +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "identifier", +// MYSTRUCT-NEXT: "spelling": "MyStruct" +// MYSTRUCT-NEXT: }, +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "text", +// MYSTRUCT-NEXT: "spelling": ";" +// MYSTRUCT-NEXT: } +// MYSTRUCT-NEXT: ], +// MYSTRUCT: "kind": { +// MYSTRUCT-NEXT: "displayName": "Structure", +// MYSTRUCT-NEXT: "identifier": "c{{(\+\+)?}}.struct" +// MYSTRUCT: "names": { +// MYSTRUCT-NEXT: "navigator": [ +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "identifier", +// MYSTRUCT-NEXT: "spelling": "MyStruct" +// MYSTRUCT-NEXT: } +// MYSTRUCT-NEXT: ], +// MYSTRUCT-NEXT: "subHeading": [ +// MYSTRUCT-NEXT: { +// MYSTRUCT-NEXT: "kind": "identifier", +// MYSTRUCT-NEXT: "spelling": "MyStruct" +// MYSTRUCT-NEXT: } +// MYSTRUCT-NEXT: ], +// MYSTRUCT-NEXT: "title": "MyStruct" +// MYSTRUCT-NEXT: }, +// MYSTRUCT: "pathComponents": [ +// MYSTRUCT-NEXT: "MyStruct" +// MYSTRUCT-NEXT: ] + +// expected-no-diagnostics diff --git a/clang/test/Index/index-deduction-guide.cpp b/clang/test/Index/index-deduction-guide.cpp new file mode 100644 index 0000000000000..a29162e8588e8 --- /dev/null +++ b/clang/test/Index/index-deduction-guide.cpp @@ -0,0 +1,10 @@ +// RUN: c-index-test core -print-source-symbols -- %s -std=gnu++17 | FileCheck %s + +template +typename T::type declval() {} +template struct Test; +template ().d())> Test(C &) -> Test; +// CHECK: [[@LINE-1]]:45 | function/C | declval +// CHECK-NOT: RelCall +// CHECK: [[@LINE-3]]:77 | struct(Gen)/C++ | Test +// CHECK: [[@LINE-4]]:64 | struct(Gen)/C++ | Test diff --git a/clang/test/OpenMP/declare_target_messages.cpp b/clang/test/OpenMP/declare_target_messages.cpp index ce5a833b3866a..3c0e766cf72ca 100644 --- a/clang/test/OpenMP/declare_target_messages.cpp +++ b/clang/test/OpenMP/declare_target_messages.cpp @@ -33,6 +33,11 @@ // RUN: %clang_cc1 %{common_opts_mac} -verify=expected,omp51,ompvar,omp45-to-51,omp5-and-51,omp5-or-later,omp5-or-later-var,omp45-to-51-var,omp45-to-51-clause,host-5-and-51,no-host5-and-51 -fopenmp %{limit} -o - %s // RUN: %clang_cc1 %{common_opts_mac} -verify=expected,omp52,ompvar,omp5-or-later,omp5-or-later-var %{openmp60} %{limit} -o - %s +#pragma omp begin declare target +static int gg; +// expected-warning@+1 {{variable 'recursive' is uninitialized when used within its own initialization}} +int recursive = recursive ^ 3 + gg; +#pragma omp end declare target // expected-error@+1 {{unexpected OpenMP directive '#pragma omp end declare target'}} #pragma omp end declare target diff --git a/clang/test/SemaCXX/cxx1z-decomposition.cpp b/clang/test/SemaCXX/cxx1z-decomposition.cpp index a8914fe4e9cd8..95c64bc3b8bff 100644 --- a/clang/test/SemaCXX/cxx1z-decomposition.cpp +++ b/clang/test/SemaCXX/cxx1z-decomposition.cpp @@ -200,38 +200,32 @@ namespace lambdas { namespace by_value_array_copy { struct explicit_copy { - explicit_copy() = default; // expected-note 2{{candidate constructor not viable: requires 0 arguments, but 1 was provided}} - explicit explicit_copy(const explicit_copy&) = default; // expected-note 2{{explicit constructor is not a candidate}} + explicit_copy() = default; // expected-note {{candidate constructor not viable: requires 0 arguments, but 1 was provided}} + explicit explicit_copy(const explicit_copy&) = default; // expected-note {{explicit constructor is not a candidate}} }; - constexpr int direct_initialization_for_elements() { - explicit_copy ec_arr[2]; - auto [a1, b1](ec_arr); + constexpr int simple_array_elements() { + int arr[2]{1, 2}; - int arr[3]{1, 2, 3}; - auto [a2, b2, c2](arr); - arr[0]--; - return a2 + b2 + c2 + arr[0]; - } - static_assert(direct_initialization_for_elements() == 6); + auto [a1, a2] = arr; + auto [b1, b2](arr); + auto [c1, c2]{arr}; // GH31813 - constexpr int copy_initialization_for_elements() { - int arr[2]{4, 5}; - auto [a1, b1] = arr; - auto [a2, b2]{arr}; // GH31813 arr[0] = 0; - return a1 + b1 + a2 + b2 + arr[0]; + return arr[0] + a1 + a2 + b1 + b2 + c1 + c2; } - static_assert(copy_initialization_for_elements() == 18); + static_assert(simple_array_elements() == 9); + + void explicit_copy_ctor_array_elements() { + explicit_copy ec_arr[1]; - void copy_initialization_for_elements_with_explicit_copy_ctor() { - explicit_copy ec_arr[2]; - auto [a1, b1] = ec_arr; // expected-error {{no matching constructor for initialization of 'explicit_copy[2]'}} - auto [a2, b2]{ec_arr}; // expected-error {{no matching constructor for initialization of 'explicit_copy[2]'}} + auto [a] = ec_arr; // expected-error {{no matching constructor for initialization of 'explicit_copy[1]'}} + auto [b](ec_arr); + auto [c]{ec_arr}; // Test prvalue - using T = explicit_copy[2]; - auto [a3, b3] = T{}; - auto [a4, b4]{T{}}; + using T = explicit_copy[1]; + auto [d] = T{}; } + } // namespace by_value_array_copy diff --git a/clang/test/SemaCXX/unique_object_duplication.cpp b/clang/test/SemaCXX/unique_object_duplication.cpp index 8a19fb7b81187..4b41bfbfdc2f7 100644 --- a/clang/test/SemaCXX/unique_object_duplication.cpp +++ b/clang/test/SemaCXX/unique_object_duplication.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -fsyntax-only -verify=hidden -Wunique-object-duplication -fvisibility=hidden -Wno-unused-value %s // RUN: %clang_cc1 -fsyntax-only -verify -Wunique-object-duplication -Wno-unused-value %s -// The check is currently disabled on windows. The test should fail because we're not getting the expected warnings. -// XFAIL: target={{.*}}-windows{{.*}}, {{.*}}-ps{{(4|5)(-.+)?}} +// The check is currently disabled on windows in MSVC-like environments. The test should fail because we're not getting the expected warnings. +// XFAIL: target={{.*}}-windows-msvc, {{.*}}-ps{{(4|5)(-.+)?}} #include "unique_object_duplication.h" diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp index e80b54b7c6967..9bfc31bd07b0e 100644 --- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp +++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp @@ -124,3 +124,38 @@ void array_indexed_const_expr(unsigned idx) { k = arr[get_const(5)]; // expected-note {{used in buffer access here}} k = arr[get_const(4)]; } + +template +consteval bool isNullTerminated(const char (&literal)[length]) +{ + return literal[length - 1] == '\0'; +} + +template +T access2DArray(const T (&arr)[M][N]) { + return arr[M-1][N-1]; +} + +template +constexpr int access_elements() { + int arr[idx + 20]; + return arr[idx + 1]; +} + +// Test array accesses where const sized arrays are accessed safely with indices +// that evaluate to a const values and depend on template arguments. +void test_template_methods() +{ + constexpr char arr[] = "Good Morning!"; // = {'a', 'b', 'c', 'd', 'e'}; + isNullTerminated(arr); + isNullTerminated(""); + auto _ = isNullTerminated("hello world\n"); + access_elements<5>(); + + int arr1[3][4] = { + {1, 2, 3, 4}, + {5, 6, 7, 8}, + {9, 10, 11, 12} + }; + access2DArray(arr1); +} diff --git a/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl b/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl index 7f6bdc7e67836..b004acdc7c502 100644 --- a/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl +++ b/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl @@ -17,6 +17,4 @@ void entry() { // These _should_ work in HLSL but aren't yet supported. S s; float2 GettingStrange = float2(s, s); // expected-error{{no viable conversion from 'S' to 'float'}} expected-error{{no viable conversion from 'S' to 'float'}} - S2 s2; - float2 EvenStranger = float2(s2); // expected-error{{cannot convert 'S2' to 'float2' (vector of 2 'float' values) without a conversion operator}} } diff --git a/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl new file mode 100644 index 0000000000000..c900c83a063a0 --- /dev/null +++ b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -verify + +export void cantCast() { + int A[3] = {1,2,3}; + int B[4] = {1,2,3,4}; + B = (int[4])A; + // expected-error@-1 {{C-style cast from 'int *' to 'int[4]' is not allowed}} +} + +struct S { +// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int2' (aka 'vector') to 'const S' for 1st argument}} +// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int2' (aka 'vector') to 'S' for 1st argument}} +// expected-note@-3 {{candidate constructor (the implicit default constructor) not viable: requires 0 arguments, but 1 was provided}} + int A : 8; + int B; +}; + +// casting types which contain bitfields is not yet supported. +export void cantCast2() { + S s = {1,2}; + int2 C = (int2)s; + // expected-error@-1 {{cannot convert 'S' to 'int2' (aka 'vector') without a conversion operator}} +} + +export void cantCast3() { + int2 C = {1,2}; + S s = (S)C; + // expected-error@-1 {{no matching conversion for C-style cast from 'int2' (aka 'vector') to 'S'}} +} diff --git a/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl b/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl new file mode 100644 index 0000000000000..563d3f02a1485 --- /dev/null +++ b/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -fnative-half-type %s -ast-dump | FileCheck %s + +// truncation +// CHECK-LABEL: call1 +// CHECK: CStyleCastExpr {{.*}} 'int[1]' +// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int[2]' part_of_explicit_cast +// CHECK-NEXT: DeclRefExpr {{.*}} 'int[2]' lvalue Var {{.*}} 'A' 'int[2]' +export void call1() { + int A[2] = {0,1}; + int B[1] = {4}; + B = (int[1])A; +} + +// flat cast of equal size +// CHECK-LABEL: call2 +// CHECK: CStyleCastExpr {{.*}} 'float[1]' +// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int[1]' part_of_explicit_cast +// CHECK-NEXT: DeclRefExpr {{.*}} 'int[1]' lvalue Var {{.*}} 'A' 'int[1]' +export void call2() { + int A[1] = {0}; + float B[1] = {1.0}; + B = (float[1])A; +} diff --git a/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp b/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp index 50457f47868a0..4f8658064e857 100644 --- a/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp +++ b/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp @@ -1,6 +1,4 @@ // Verify the ELF packaging of OpenMP SPIR-V device images. -// FIXME: Re-enable when spirv-tools feature detection fixed -// UNSUPPORTED: system-linux // REQUIRES: system-linux // REQUIRES: spirv-tools // RUN: mkdir -p %t_tmp diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in index ce10e9128a1df..6890da5327cb9 100644 --- a/clang/test/lit.site.cfg.py.in +++ b/clang/test/lit.site.cfg.py.in @@ -43,7 +43,7 @@ config.llvm_external_lit = path(r"@LLVM_EXTERNAL_LIT@") config.standalone_build = @CLANG_BUILT_STANDALONE@ config.ppc_linux_default_ieeelongdouble = @PPC_LINUX_DEFAULT_IEEELONGDOUBLE@ config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@ -config.spirv_tools_tests = "@LLVM_INCLUDE_SPIRV_TOOLS_TESTS@" +config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@ config.substitutions.append(("%llvm-version-major", "@LLVM_VERSION_MAJOR@")) import lit.llvm diff --git a/clang/tools/clang-format/clang-format.el b/clang/tools/clang-format/clang-format.el index 54ab2e1fd4c61..b356e1bfa1436 100644 --- a/clang/tools/clang-format/clang-format.el +++ b/clang/tools/clang-format/clang-format.el @@ -32,6 +32,7 @@ (require 'cl-lib) (require 'xml) +(require 'vc-git) (defgroup clang-format nil "Format code using clang-format." diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp index b189cfee674dd..1a82a1c59b721 100644 --- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp +++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp @@ -485,7 +485,6 @@ Expected clang(ArrayRef InputFiles, const ArgList &Args) { if (!TempFileOrErr) return TempFileOrErr.takeError(); - StringRef OptLevel = Args.getLastArgValue(OPT_opt_level, "O2"); SmallVector CmdArgs{ *ClangPath, "--no-default-config", @@ -493,12 +492,9 @@ Expected clang(ArrayRef InputFiles, const ArgList &Args) { *TempFileOrErr, Args.MakeArgString("--target=" + Triple.getTriple()), Triple.isAMDGPU() ? Args.MakeArgString("-mcpu=" + Arch) - : Args.MakeArgString("-march=" + Arch), - Args.MakeArgString("-" + OptLevel), - }; + : Args.MakeArgString("-march=" + Arch)}; // Forward all of the `--offload-opt` and similar options to the device. - CmdArgs.push_back("-flto"); for (auto &Arg : Args.filtered(OPT_offload_opt_eq_minus, OPT_mllvm)) CmdArgs.append( {"-Xlinker", @@ -547,29 +543,12 @@ Expected clang(ArrayRef InputFiles, const ArgList &Args) { CmdArgs.append({"-Xlinker", Args.MakeArgString( "-mllvm=" + StringRef(Arg->getValue()))}); - if (Args.hasArg(OPT_debug)) - CmdArgs.push_back("-g"); - - if (SaveTemps) - CmdArgs.push_back("-save-temps"); - if (SaveTemps && linkerSupportsLTO(Args)) CmdArgs.push_back("-Wl,--save-temps"); if (Args.hasArg(OPT_embed_bitcode)) CmdArgs.push_back("-Wl,--lto-emit-llvm"); - if (Verbose) - CmdArgs.push_back("-v"); - - if (!CudaBinaryPath.empty()) - CmdArgs.push_back(Args.MakeArgString("--cuda-path=" + CudaBinaryPath)); - - for (StringRef Arg : Args.getAllArgValues(OPT_ptxas_arg)) - llvm::copy( - SmallVector({"-Xcuda-ptxas", Args.MakeArgString(Arg)}), - std::back_inserter(CmdArgs)); - for (StringRef Arg : Args.getAllArgValues(OPT_linker_arg_EQ)) CmdArgs.append({"-Xlinker", Args.MakeArgString(Arg)}); for (StringRef Arg : Args.getAllArgValues(OPT_compiler_arg_EQ)) diff --git a/clang/tools/clang-linker-wrapper/LinkerWrapperOpts.td b/clang/tools/clang-linker-wrapper/LinkerWrapperOpts.td index 57d918db0a73c..17fb9db35fe39 100644 --- a/clang/tools/clang-linker-wrapper/LinkerWrapperOpts.td +++ b/clang/tools/clang-linker-wrapper/LinkerWrapperOpts.td @@ -17,11 +17,9 @@ def cuda_path_EQ : Joined<["--"], "cuda-path=">, Flags<[WrapperOnlyOption]>, MetaVarName<"">, HelpText<"Set the system CUDA path">; def host_triple_EQ : Joined<["--"], "host-triple=">, - Flags<[WrapperOnlyOption]>, MetaVarName<"">, - HelpText<"Triple to use for the host compilation">; -def opt_level : Joined<["--"], "opt-level=">, - Flags<[WrapperOnlyOption]>, MetaVarName<"">, - HelpText<"Optimization level for LTO">; + Flags<[WrapperOnlyOption]>, + MetaVarName<"">, + HelpText<"Triple to use for the host compilation">; def device_linker_args_EQ : Joined<["--"], "device-linker=">, Flags<[WrapperOnlyOption]>, MetaVarName<" or =">, HelpText<"Arguments to pass to the device linker invocation">; @@ -34,18 +32,8 @@ def dry_run : Flag<["--"], "dry-run">, def verbose : Flag<["--"], "wrapper-verbose">, Flags<[WrapperOnlyOption]>, HelpText<"Verbose output from tools">; def embed_bitcode : Flag<["--"], "embed-bitcode">, - Flags<[WrapperOnlyOption]>, HelpText<"Embed linked bitcode in the module">; -def debug : Flag<["--"], "device-debug">, Flags<[WrapperOnlyOption]>, - HelpText<"Use debugging">; -def ptxas_arg : Joined<["--"], "ptxas-arg=">, - Flags<[WrapperOnlyOption]>, - HelpText<"Argument to pass to the 'ptxas' invocation">; -def pass_remarks_EQ : Joined<["--"], "pass-remarks=">, - Flags<[WrapperOnlyOption]>, HelpText<"Pass remarks for LTO">; -def pass_remarks_missed_EQ : Joined<["--"], "pass-remarks-missed=">, - Flags<[WrapperOnlyOption]>, HelpText<"Pass remarks for LTO">; -def pass_remarks_analysis_EQ : Joined<["--"], "pass-remarks-analysis=">, - Flags<[WrapperOnlyOption]>, HelpText<"Pass remarks for LTO">; + Flags<[WrapperOnlyOption]>, + HelpText<"Embed linked bitcode in the module">; def print_wrapped_module : Flag<["--"], "print-wrapped-module">, Flags<[WrapperOnlyOption]>, HelpText<"Print the wrapped module's IR for testing">; diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp index 0cb2a1288bfd7..9cd262960b724 100644 --- a/clang/unittests/Format/ConfigParseTest.cpp +++ b/clang/unittests/Format/ConfigParseTest.cpp @@ -168,6 +168,7 @@ TEST(ConfigParseTest, ParsesConfigurationBools) { CHECK_PARSE_BOOL(AllowShortLoopsOnASingleLine); CHECK_PARSE_BOOL(AllowShortNamespacesOnASingleLine); CHECK_PARSE_BOOL(BinPackArguments); + CHECK_PARSE_BOOL(BinPackLongBracedList); CHECK_PARSE_BOOL(BreakAdjacentStringLiterals); CHECK_PARSE_BOOL(BreakAfterJavaFieldAnnotations); CHECK_PARSE_BOOL(BreakBeforeTemplateCloser); diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp index a9fddc3275aed..9b9ce35f83bc5 100644 --- a/clang/unittests/Format/FormatTest.cpp +++ b/clang/unittests/Format/FormatTest.cpp @@ -14420,6 +14420,51 @@ TEST_F(FormatTest, LayoutCxx11BraceInitializers) { "};", NoBinPacking); + NoBinPacking.BinPackLongBracedList = false; + verifyFormat("const Aaaaaa aaaaa = {aaaaa,\n" + " bbbbb,\n" + " ccccc,\n" + " ddddd,\n" + " eeeee,\n" + " ffffff,\n" + " ggggg,\n" + " hhhhhh,\n" + " iiiiii,\n" + " jjjjjj,\n" + " kkkkkk,\n" + " aaaaa,\n" + " bbbbb,\n" + " ccccc,\n" + " ddddd,\n" + " eeeee,\n" + " ffffff,\n" + " ggggg,\n" + " hhhhhh,\n" + " iiiiii};", + NoBinPacking); + verifyFormat("const Aaaaaa aaaaa = {\n" + " aaaaa,\n" + " bbbbb,\n" + " ccccc,\n" + " ddddd,\n" + " eeeee,\n" + " ffffff,\n" + " ggggg,\n" + " hhhhhh,\n" + " iiiiii,\n" + " jjjjjj,\n" + " kkkkkk,\n" + " aaaaa,\n" + " bbbbb,\n" + " ccccc,\n" + " ddddd,\n" + " eeeee,\n" + " ffffff,\n" + " ggggg,\n" + " hhhhhh,\n" + "};", + NoBinPacking); + NoBinPacking.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak; verifyFormat("static uint8 CddDp83848Reg[] = {\n" " CDDDP83848_BMCR_REGISTER,\n" diff --git a/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/clang/utils/TableGen/ClangOptionDocEmitter.cpp index e08fb11df3100..b6c1aad90b5cb 100644 --- a/clang/utils/TableGen/ClangOptionDocEmitter.cpp +++ b/clang/utils/TableGen/ClangOptionDocEmitter.cpp @@ -109,13 +109,17 @@ Documentation extractDocumentation(const RecordKeeper &Records, // Pretend no-X and Xno-Y options are aliases of X and XY. std::string Name = std::string(R->getValueAsString("Name")); if (Name.size() >= 4) { - if (Name.substr(0, 3) == "no-" && OptionsByName[Name.substr(3)]) { - Aliases[OptionsByName[Name.substr(3)]].push_back(R); - continue; + if (Name.substr(0, 3) == "no-") { + if (const Record *Opt = OptionsByName[Name.substr(3)]) { + Aliases[Opt].push_back(R); + continue; + } } - if (Name.substr(1, 3) == "no-" && OptionsByName[Name[0] + Name.substr(4)]) { - Aliases[OptionsByName[Name[0] + Name.substr(4)]].push_back(R); - continue; + if (Name.substr(1, 3) == "no-") { + if (const Record *Opt = OptionsByName[Name[0] + Name.substr(4)]) { + Aliases[Opt].push_back(R); + continue; + } } } diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp index e77679876a3af..014b20667e03e 100644 --- a/clang/utils/TableGen/MveEmitter.cpp +++ b/clang/utils/TableGen/MveEmitter.cpp @@ -209,9 +209,7 @@ class PointerType : public Type { Name = "const " + Name; return Name + " *"; } - std::string llvmName() const override { - return "llvm::PointerType::getUnqual(" + Pointee->llvmName() + ")"; - } + std::string llvmName() const override { return "Builder.getPtrTy()"; } const Type *getPointeeType() const { return Pointee; } static bool classof(const Type *T) { diff --git a/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp b/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp index 83e6cdd4a0094..410da0748b433 100644 --- a/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp +++ b/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp @@ -254,6 +254,27 @@ INTERCEPTOR(int, fchdir, int fd) { return REAL(fchdir)(fd); } +#if SANITIZER_INTERCEPT_READLINK +INTERCEPTOR(ssize_t, readlink, const char *pathname, char *buf, size_t size) { + __rtsan_notify_intercepted_call("readlink"); + return REAL(readlink)(pathname, buf, size); +} +#define RTSAN_MAYBE_INTERCEPT_READLINK INTERCEPT_FUNCTION(readlink) +#else +#define RTSAN_MAYBE_INTERCEPT_READLINK +#endif + +#if SANITIZER_INTERCEPT_READLINKAT +INTERCEPTOR(ssize_t, readlinkat, int dirfd, const char *pathname, char *buf, + size_t size) { + __rtsan_notify_intercepted_call("readlinkat"); + return REAL(readlinkat)(dirfd, pathname, buf, size); +} +#define RTSAN_MAYBE_INTERCEPT_READLINKAT INTERCEPT_FUNCTION(readlinkat) +#else +#define RTSAN_MAYBE_INTERCEPT_READLINKAT +#endif + // Streams INTERCEPTOR(FILE *, fopen, const char *path, const char *mode) { @@ -1402,6 +1423,8 @@ void __rtsan::InitializeInterceptors() { INTERCEPT_FUNCTION(close); INTERCEPT_FUNCTION(chdir); INTERCEPT_FUNCTION(fchdir); + RTSAN_MAYBE_INTERCEPT_READLINK; + RTSAN_MAYBE_INTERCEPT_READLINKAT; INTERCEPT_FUNCTION(fopen); RTSAN_MAYBE_INTERCEPT_FOPEN64; RTSAN_MAYBE_INTERCEPT_FREOPEN64; diff --git a/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp b/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp index 075f5974b7562..98d27caae94b8 100644 --- a/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp +++ b/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp @@ -457,6 +457,24 @@ TEST(TestRtsanInterceptors, FchdirDiesWhenRealtime) { ExpectNonRealtimeSurvival(Func); } +#if SANITIZER_INTERCEPT_READLINK +TEST(TestRtsanInterceptors, ReadlinkDiesWhenRealtime) { + char buf[1024]; + auto Func = [&buf]() { readlink("/proc/self", buf, sizeof(buf)); }; + ExpectRealtimeDeath(Func, "readlink"); + ExpectNonRealtimeSurvival(Func); +} +#endif + +#if SANITIZER_INTERCEPT_READLINKAT +TEST(TestRtsanInterceptors, ReadlinkatDiesWhenRealtime) { + char buf[1024]; + auto Func = [&buf]() { readlinkat(0, "/proc/self", buf, sizeof(buf)); }; + ExpectRealtimeDeath(Func, "readlinkat"); + ExpectNonRealtimeSurvival(Func); +} +#endif + TEST_F(RtsanFileTest, FopenDiesWhenRealtime) { auto Func = [this]() { FILE *f = fopen(GetTemporaryFilePath(), "w"); diff --git a/flang/CMakeLists.txt b/flang/CMakeLists.txt index 2e27bc2279ac4..c6271f1856eb9 100644 --- a/flang/CMakeLists.txt +++ b/flang/CMakeLists.txt @@ -247,6 +247,8 @@ else() include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) endif() +option(FLANG_INCLUDE_RUNTIME "Build the runtime in-tree (deprecated; to be replaced with LLVM_ENABLE_RUNTIMES=flang-rt)" ON) + set(FLANG_TOOLS_INSTALL_DIR "${CMAKE_INSTALL_BINDIR}" CACHE PATH "Path for binary subdirectory (defaults to '${CMAKE_INSTALL_BINDIR}')") mark_as_advanced(FLANG_TOOLS_INSTALL_DIR) @@ -299,7 +301,7 @@ set(FLANG_DEFAULT_LINKER "" CACHE STRING "Default linker to use (linker name or absolute path, empty for platform default)") set(FLANG_DEFAULT_RTLIB "" CACHE STRING - "Default Fortran runtime library to use (\"libFortranRuntime\"), leave empty for platform default.") + "Default Fortran runtime library to use (\"libflang_rt.runtime\"), leave empty for platform default.") if (NOT(FLANG_DEFAULT_RTLIB STREQUAL "")) message(WARNING "Resetting Flang's default runtime library to use platform default.") @@ -487,7 +489,9 @@ if (FLANG_BUILD_TOOLS) add_subdirectory(tools) endif() -add_subdirectory(runtime) +if (FLANG_INCLUDE_RUNTIME) + add_subdirectory(runtime) +endif () if (LLVM_INCLUDE_EXAMPLES) add_subdirectory(examples) diff --git a/flang/cmake/modules/AddFlang.cmake b/flang/cmake/modules/AddFlang.cmake index 1f178772067ed..c9f65eb73fef0 100644 --- a/flang/cmake/modules/AddFlang.cmake +++ b/flang/cmake/modules/AddFlang.cmake @@ -57,7 +57,7 @@ function(add_flang_library name) set(LIBTYPE SHARED) elseif(ARG_STATIC) # If BUILD_SHARED_LIBS and ARG_STATIC are both set, llvm_add_library prioritizes STATIC. - # This is required behavior for libFortranFloat128Math. + # This is required behavior for libflang_rt.quadmath. set(LIBTYPE STATIC) else() # Let llvm_add_library decide, taking BUILD_SHARED_LIBS into account. diff --git a/flang/docs/Directives.md b/flang/docs/Directives.md index f356f762b13a2..c6c2e29a420ea 100644 --- a/flang/docs/Directives.md +++ b/flang/docs/Directives.md @@ -39,15 +39,22 @@ A list of non-standard directives supported by Flang * `!dir$ vector always` forces vectorization on the following loop regardless of cost model decisions. The loop must still be vectorizable. [This directive currently only works on plain do loops without labels]. +* `!dir$ unroll [n]` specifies that the compiler ought to unroll the immediately + following loop `n` times. When `n` is `0` or `1`, the loop should not be unrolled + at all. When `n` is `2` or greater, the loop should be unrolled exactly `n` + times if possible. When `n` is omitted, the compiler should attempt to fully + unroll the loop. Some compilers accept an optional `=` before the `n` when `n` + is present in the directive. Flang does not. # Directive Details ## Introduction Directives are commonly used in Fortran programs to specify additional actions to be performed by the compiler. The directives are always specified with the -`!dir$` or `cdir$` prefix. +`!dir$` or `cdir$` prefix. ## Loop Directives + Some directives are associated with the following construct, for example loop directives. Directives on loops are used to specify additional transformation to be performed by the compiler like enabling vectorisation, unrolling, interchange diff --git a/flang/docs/FlangDriver.md b/flang/docs/FlangDriver.md index 309c5e2024dd8..97744f0bee069 100644 --- a/flang/docs/FlangDriver.md +++ b/flang/docs/FlangDriver.md @@ -175,19 +175,18 @@ like this: ``` $ flang -v -o example example.o -"/usr/bin/ld" [...] example.o [...] "-lFortranRuntime" "-lFortranDecimal" [...] +"/usr/bin/ld" [...] example.o [...] "-lflang_rt.runtime" [...] ``` The automatically added libraries are: -* `FortranRuntime`: Provides most of the Flang runtime library. -* `FortranDecimal`: Provides operations for decimal numbers. +* `flang_rt.runtime`: Provides most of the Flang runtime library. If the code is C/C++ based and invokes Fortran routines, one can either use Clang or Flang as the linker driver. If Clang is used, it will automatically all required runtime libraries needed by C++ (e.g., for STL) to the linker invocation. -In this case, one has to explicitly provide the Fortran runtime libraries -`FortranRuntime` and/or `FortranDecimal`. An alternative is to use Flang to link. +In this case, one has to explicitly provide the Fortran runtime library +`flang_rt.runtime`. An alternative is to use Flang to link. In this case, it may be required to explicitly supply C++ runtime libraries. On Darwin, the logical root where the system libraries are located (sysroot) diff --git a/flang/docs/GettingStarted.md b/flang/docs/GettingStarted.md index 1c85a6754b155..e422a31a0b402 100644 --- a/flang/docs/GettingStarted.md +++ b/flang/docs/GettingStarted.md @@ -216,7 +216,7 @@ cmake \ -DCMAKE_CUDA_COMPILER=clang \ -DCMAKE_CUDA_HOST_COMPILER=clang++ \ ../runtime/ -make -j FortranRuntime +make -j flang-rt ``` Note that the used version of `clang` must [support](https://releases.llvm.org/16.0.0/tools/clang/docs/ReleaseNotes.html#cuda-support) @@ -239,7 +239,7 @@ cmake \ -DCMAKE_CUDA_HOST_COMPILER=clang++ \ ../runtime/ -make -j FortranRuntime +make -j flang-rt ``` Note that `nvcc` might limit support to certain @@ -294,7 +294,7 @@ cmake \ -DFLANG_OMP_DEVICE_ARCHITECTURES="all" \ ../runtime/ -make -j FortranRuntime +make -j flang-rt ``` The result of the build is a "device-only" library, i.e. the host @@ -309,7 +309,7 @@ The same set of CMake variables works for Flang in-tree build. One may provide optional CMake variables to customize the build. Available options: * `-DFLANG_RUNTIME_F128_MATH_LIB=libquadmath`: enables build of - `FortranFloat128Math` library that provides `REAL(16)` math APIs + `flang_rt.quadmath` library that provides `REAL(16)` math APIs for intrinsics such as `SIN`, `COS`, etc. GCC `libquadmath`'s header file `quadmath.h` must be available to the build compiler. [More details](Real16MathSupport.md). diff --git a/flang/docs/OpenACC-descriptor-management.md b/flang/docs/OpenACC-descriptor-management.md index 52d00ae4daef8..008c57937e23b 100644 --- a/flang/docs/OpenACC-descriptor-management.md +++ b/flang/docs/OpenACC-descriptor-management.md @@ -427,7 +427,7 @@ The implementation's behavior may be described as (OpenACC 2.7.2): All the "is-present" checks and the data actions for the auxiliary pointers must be performed atomically with regards to the present counters bookkeeping. -The API relies on the primitives provided by `liboffload`, so it is provided by a new F18 runtime library, e.g. `FortranOffloadRuntime`, that depends on `FortranRuntime` and `liboffload`. The F18 driver adds `FortranOffloadRuntime` for linking under `-fopenacc`/`-fopenmp` (and maybe additional switches like `-fopenmp-targets`). +The API relies on the primitives provided by `liboffload`, so it is provided by a new F18 runtime library, e.g. `FortranOffloadRuntime`, that depends on `flang_rt.runtime` and `liboffload`. The F18 driver adds `FortranOffloadRuntime` for linking under `-fopenacc`/`-fopenmp` (and maybe additional switches like `-fopenmp-targets`). ## TODOs: diff --git a/flang/docs/Real16MathSupport.md b/flang/docs/Real16MathSupport.md index 21482c7be21af..93492c8b767c3 100644 --- a/flang/docs/Real16MathSupport.md +++ b/flang/docs/Real16MathSupport.md @@ -12,9 +12,9 @@ To support most `REAL(16)` (i.e. 128-bit float) math intrinsics Flang relies on third-party libraries providing the implementation. `-DFLANG_RUNTIME_F128_MATH_LIB=libquadmath` CMake option can be used -to build `FortranFloat128Math` library that has unresolved references +to build `libflang_rt.quadmath` library that has unresolved references to GCC `libquadmath` library. A Flang driver built with this option -will automatically link `FortranFloat128Math` and `libquadmath` libraries +will automatically link `libflang_rt.quadmath` and `libquadmath` libraries to any Fortran program. This implies that `libquadmath` library has to be available in the standard library paths, so that linker can find it. The `libquadmath` library installation into Flang project diff --git a/flang/docs/ReleaseNotes.md b/flang/docs/ReleaseNotes.md index f0c956281915f..387d4b2e62e0f 100644 --- a/flang/docs/ReleaseNotes.md +++ b/flang/docs/ReleaseNotes.md @@ -36,6 +36,13 @@ page](https://llvm.org/releases/). ## Build System Changes + * The FortranRuntime library has been renamed to `flang_rt.runtime`. + + * The FortranFloat128Math library has been renamed to `flang_rt.quadmath`. + + * The CufRuntime_cuda_${version} library has been renamed to + `flang_rt.cuda_${version}`. + ## New Issues Found diff --git a/flang/examples/CMakeLists.txt b/flang/examples/CMakeLists.txt index 8cc66ddbbbb0e..23c6e790791fb 100644 --- a/flang/examples/CMakeLists.txt +++ b/flang/examples/CMakeLists.txt @@ -1,4 +1,6 @@ -add_subdirectory(ExternalHelloWorld) +if (FLANG_INCLUDE_RUNTIME) + add_subdirectory(ExternalHelloWorld) +endif () add_subdirectory(PrintFlangFunctionNames) add_subdirectory(FlangOmpReport) add_subdirectory(FeatureList) diff --git a/flang/examples/ExternalHelloWorld/CMakeLists.txt b/flang/examples/ExternalHelloWorld/CMakeLists.txt index 042d4b6238ba4..b61948718a5e3 100644 --- a/flang/examples/ExternalHelloWorld/CMakeLists.txt +++ b/flang/examples/ExternalHelloWorld/CMakeLists.txt @@ -5,5 +5,5 @@ add_llvm_example(external-hello-world target_link_libraries(external-hello-world PRIVATE - FortranRuntime + flang_rt.runtime ) diff --git a/flang/include/flang/Evaluate/target.h b/flang/include/flang/Evaluate/target.h index f4595dfe4e433..ead4481c32e12 100644 --- a/flang/include/flang/Evaluate/target.h +++ b/flang/include/flang/Evaluate/target.h @@ -143,9 +143,10 @@ class TargetCharacteristics { std::string compilerOptionsString_; std::string compilerVersionString_; IeeeFeatures ieeeFeatures_{IeeeFeature::Denormal, IeeeFeature::Divide, - IeeeFeature::Flags, IeeeFeature::Inf, IeeeFeature::Io, IeeeFeature::NaN, - IeeeFeature::Rounding, IeeeFeature::Sqrt, IeeeFeature::Standard, - IeeeFeature::Subnormal, IeeeFeature::UnderflowControl}; + IeeeFeature::Flags, IeeeFeature::Halting, IeeeFeature::Inf, + IeeeFeature::Io, IeeeFeature::NaN, IeeeFeature::Rounding, + IeeeFeature::Sqrt, IeeeFeature::Standard, IeeeFeature::Subnormal, + IeeeFeature::UnderflowControl}; }; } // namespace Fortran::evaluate diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h index 32010ae83641e..47e8a77fa6aec 100644 --- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h @@ -406,6 +406,7 @@ struct IntrinsicLibrary { mlir::Value genSyncThreadsAnd(mlir::Type, llvm::ArrayRef); mlir::Value genSyncThreadsCount(mlir::Type, llvm::ArrayRef); mlir::Value genSyncThreadsOr(mlir::Type, llvm::ArrayRef); + void genSyncWarp(llvm::ArrayRef); fir::ExtendedValue genSystem(std::optional, mlir::ArrayRef args); void genSystemClock(llvm::ArrayRef); diff --git a/flang/include/flang/Tools/TargetSetup.h b/flang/include/flang/Tools/TargetSetup.h index 5d23df6823a94..d167f44fe2fd7 100644 --- a/flang/include/flang/Tools/TargetSetup.h +++ b/flang/include/flang/Tools/TargetSetup.h @@ -25,8 +25,6 @@ namespace Fortran::tools { const llvm::Triple &targetTriple{targetMachine.getTargetTriple()}; - targetCharacteristics.set_ieeeFeature(evaluate::IeeeFeature::Halting, true); - if (targetTriple.getArch() == llvm::Triple::ArchType::x86_64) { targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/3); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/4); @@ -37,6 +35,8 @@ namespace Fortran::tools { targetCharacteristics.set_haltingSupportIsUnknownAtCompileTime(); targetCharacteristics.set_ieeeFeature( evaluate::IeeeFeature::Halting, false); + targetCharacteristics.set_ieeeFeature( + evaluate::IeeeFeature::Standard, false); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/3); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/4); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/8); diff --git a/flang/lib/Decimal/CMakeLists.txt b/flang/lib/Decimal/CMakeLists.txt index 2fd2a429d7c22..477d44e0565eb 100644 --- a/flang/lib/Decimal/CMakeLists.txt +++ b/flang/lib/Decimal/CMakeLists.txt @@ -6,89 +6,7 @@ # #===------------------------------------------------------------------------===# -if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - cmake_minimum_required(VERSION 3.20.0) - - project(FortranDecimal C CXX) - - set(CMAKE_CXX_STANDARD 17) - set(CMAKE_CXX_STANDARD_REQUIRED TRUE) - set(CMAKE_CXX_EXTENSIONS OFF) - - set(FLANG_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..") - - set(LLVM_COMMON_CMAKE_UTILS "${FLANG_SOURCE_DIR}/../cmake") - set(LLVM_CMAKE_UTILS "${FLANG_SOURCE_DIR}/../llvm/cmake") - set(CLANG_CMAKE_UTILS "${FLANG_SOURCE_DIR}/../clang/cmake") - - # Add path for custom modules - list(INSERT CMAKE_MODULE_PATH 0 - "${FLANG_SOURCE_DIR}/cmake" - "${FLANG_SOURCE_DIR}/cmake/modules" - "${LLVM_COMMON_CMAKE_UTILS}" - "${LLVM_COMMON_CMAKE_UTILS}/Modules" - "${LLVM_CMAKE_UTILS}" - "${LLVM_CMAKE_UTILS}/modules" - "${CLANG_CMAKE_UTILS}/modules" - ) - - include(AddClang) - include(AddLLVM) - include(AddFlang) - include(HandleLLVMOptions) - - include(TestBigEndian) - test_big_endian(IS_BIGENDIAN) - if (IS_BIGENDIAN) - add_compile_definitions(FLANG_BIG_ENDIAN=1) - else () - add_compile_definitions(FLANG_LITTLE_ENDIAN=1) - endif () - include_directories(BEFORE - ${FLANG_SOURCE_DIR}/include) -endif() - -check_cxx_compiler_flag(-fno-lto FLANG_RUNTIME_HAS_FNO_LTO_FLAG) -if (FLANG_RUNTIME_HAS_FNO_LTO_FLAG) - append("-fno-lto" CMAKE_CXX_FLAGS) -endif() - -# Disable libstdc++ assertions, even in an LLVM_ENABLE_ASSERTIONS build, to -# avoid an unwanted dependency on libstdc++.so. -add_definitions(-U_GLIBCXX_ASSERTIONS) - -set(sources +add_flang_library(FortranDecimal binary-to-decimal.cpp decimal-to-binary.cpp ) - -include(AddFlangOffloadRuntime) -enable_cuda_compilation(FortranDecimal "${sources}") -enable_omp_offload_compilation("${sources}") - -add_flang_library(FortranDecimal INSTALL_WITH_TOOLCHAIN ${sources}) - -if (DEFINED MSVC) - set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded) - add_flang_library(FortranDecimal.static INSTALL_WITH_TOOLCHAIN - binary-to-decimal.cpp - decimal-to-binary.cpp - ) - set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDLL) - add_flang_library(FortranDecimal.dynamic INSTALL_WITH_TOOLCHAIN - binary-to-decimal.cpp - decimal-to-binary.cpp - ) - set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebug) - add_flang_library(FortranDecimal.static_dbg INSTALL_WITH_TOOLCHAIN - binary-to-decimal.cpp - decimal-to-binary.cpp - ) - set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebugDLL) - add_flang_library(FortranDecimal.dynamic_dbg INSTALL_WITH_TOOLCHAIN - binary-to-decimal.cpp - decimal-to-binary.cpp - ) - add_dependencies(FortranDecimal FortranDecimal.static FortranDecimal.dynamic - FortranDecimal.static_dbg FortranDecimal.dynamic_dbg) -endif() diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index a31629b17cf29..36e58e456dea3 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -63,6 +63,7 @@ #include "flang/Semantics/tools.h" #include "flang/Support/Version.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Parser/Parser.h" @@ -2170,11 +2171,38 @@ class FirConverter : public Fortran::lower::AbstractConverter { return builder->createIntegerConstant(loc, controlType, 1); // step } + // For unroll directives without a value, force full unrolling. + // For unroll directives with a value, if the value is greater than 1, + // force unrolling with the given factor. Otherwise, disable unrolling. + mlir::LLVM::LoopUnrollAttr + genLoopUnrollAttr(std::optional directiveArg) { + mlir::BoolAttr falseAttr = + mlir::BoolAttr::get(builder->getContext(), false); + mlir::BoolAttr trueAttr = mlir::BoolAttr::get(builder->getContext(), true); + mlir::IntegerAttr countAttr; + mlir::BoolAttr fullUnrollAttr; + bool shouldUnroll = true; + if (directiveArg.has_value()) { + auto unrollingFactor = directiveArg.value(); + if (unrollingFactor == 0 || unrollingFactor == 1) { + shouldUnroll = false; + } else { + countAttr = + builder->getIntegerAttr(builder->getI64Type(), unrollingFactor); + } + } else { + fullUnrollAttr = trueAttr; + } + + mlir::BoolAttr disableAttr = shouldUnroll ? falseAttr : trueAttr; + return mlir::LLVM::LoopUnrollAttr::get( + builder->getContext(), /*disable=*/disableAttr, /*count=*/countAttr, {}, + /*full=*/fullUnrollAttr, {}, {}, {}); + } + void addLoopAnnotationAttr( IncrementLoopInfo &info, llvm::SmallVectorImpl &dirs) { - mlir::BoolAttr f = mlir::BoolAttr::get(builder->getContext(), false); - mlir::BoolAttr t = mlir::BoolAttr::get(builder->getContext(), true); mlir::LLVM::LoopVectorizeAttr va; mlir::LLVM::LoopUnrollAttr ua; bool has_attrs = false; @@ -2182,20 +2210,15 @@ class FirConverter : public Fortran::lower::AbstractConverter { Fortran::common::visit( Fortran::common::visitors{ [&](const Fortran::parser::CompilerDirective::VectorAlways &) { + mlir::BoolAttr falseAttr = + mlir::BoolAttr::get(builder->getContext(), false); va = mlir::LLVM::LoopVectorizeAttr::get(builder->getContext(), - /*disable=*/f, {}, {}, - {}, {}, {}, {}); + /*disable=*/falseAttr, + {}, {}, {}, {}, {}, {}); has_attrs = true; }, [&](const Fortran::parser::CompilerDirective::Unroll &u) { - mlir::IntegerAttr countAttr; - if (u.v.has_value()) { - countAttr = builder->getIntegerAttr(builder->getI64Type(), - u.v.value()); - } - ua = mlir::LLVM::LoopUnrollAttr::get( - builder->getContext(), /*disable=*/f, /*count*/ countAttr, - {}, /*full*/ u.v.has_value() ? f : t, {}, {}, {}); + ua = genLoopUnrollAttr(u.v); has_attrs = true; }, [&](const auto &) {}}, diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index a6a77dd58677b..9a80e36efe837 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -680,6 +680,7 @@ static constexpr IntrinsicHandler handlers[]{ {"syncthreads_and", &I::genSyncThreadsAnd, {}, /*isElemental=*/false}, {"syncthreads_count", &I::genSyncThreadsCount, {}, /*isElemental=*/false}, {"syncthreads_or", &I::genSyncThreadsOr, {}, /*isElemental=*/false}, + {"syncwarp", &I::genSyncWarp, {}, /*isElemental=*/false}, {"system", &I::genSystem, {{{"command", asBox}, {"exitstat", asBox, handleDynamicOptional}}}, @@ -808,8 +809,8 @@ prettyPrintIntrinsicName(fir::FirOpBuilder &builder, mlir::Location loc, // Generate a call to the Fortran runtime library providing // support for 128-bit float math. // On 'HAS_LDBL128' targets the implementation -// is provided by FortranRuntime, otherwise, it is done via -// FortranFloat128Math library. In the latter case the compiler +// is provided by flang_rt, otherwise, it is done via the +// libflang_rt.quadmath library. In the latter case the compiler // has to be built with FLANG_RUNTIME_F128_MATH_LIB to guarantee // proper linking actions in the driver. static mlir::Value genLibF128Call(fir::FirOpBuilder &builder, @@ -7704,6 +7705,18 @@ IntrinsicLibrary::genSyncThreadsOr(mlir::Type resultType, return builder.create(loc, funcOp, args).getResult(0); } +// SYNCWARP +void IntrinsicLibrary::genSyncWarp(llvm::ArrayRef args) { + assert(args.size() == 1); + constexpr llvm::StringLiteral funcName = "llvm.nvvm.bar.warp.sync"; + mlir::Value mask = fir::getBase(args[0]); + mlir::FunctionType funcType = + mlir::FunctionType::get(builder.getContext(), {mask.getType()}, {}); + auto funcOp = builder.createFunction(loc, funcName, funcType); + llvm::SmallVector argsList{mask}; + builder.create(loc, funcOp, argsList); +} + // SYSTEM fir::ExtendedValue IntrinsicLibrary::genSystem(std::optional resultType, diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp index 5c26469b9fa24..bf4dc16a15b4a 100644 --- a/flang/lib/Semantics/check-declarations.cpp +++ b/flang/lib/Semantics/check-declarations.cpp @@ -989,9 +989,9 @@ void CheckHelper::CheckObjectEntity( } break; case common::CUDADataAttr::Device: - if (isComponent && !IsAllocatable(symbol)) { + if (isComponent && !IsAllocatable(symbol) && !IsPointer(symbol)) { messages_.Say( - "Component '%s' with ATTRIBUTES(DEVICE) must also be allocatable"_err_en_US, + "Component '%s' with ATTRIBUTES(DEVICE) must also be allocatable or pointer"_err_en_US, symbol.name()); } break; diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90 index 47526bccd98fe..45b9f2c838638 100644 --- a/flang/module/cudadevice.f90 +++ b/flang/module/cudadevice.f90 @@ -49,7 +49,7 @@ attributes(device) integer function syncthreads_or(value) public :: syncthreads_or interface - attributes(device) subroutine syncwarp(mask) bind(c, name='__syncwarp') + attributes(device) subroutine syncwarp(mask) integer, value :: mask end subroutine end interface diff --git a/flang/runtime/CMakeLists.txt b/flang/runtime/CMakeLists.txt index bf27a121e4d17..7cc720e2df9af 100644 --- a/flang/runtime/CMakeLists.txt +++ b/flang/runtime/CMakeLists.txt @@ -59,7 +59,7 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) ) endif() -set(linked_libraries FortranDecimal) +set(linked_libraries "") # function checks find_package(Backtrace) @@ -116,6 +116,8 @@ add_definitions(-U_LIBCPP_ENABLE_ASSERTIONS) add_subdirectory(Float128Math) set(sources + ${FLANG_SOURCE_DIR}/lib/Decimal/binary-to-decimal.cpp + ${FLANG_SOURCE_DIR}/lib/Decimal/decimal-to-binary.cpp ISO_Fortran_binding.cpp allocator-registry.cpp allocatable.cpp @@ -239,13 +241,13 @@ set(supported_files utf.cpp ) -enable_cuda_compilation(FortranRuntime "${supported_files}") +enable_cuda_compilation(flang_rt "${supported_files}") enable_omp_offload_compilation("${supported_files}") -if (NOT TARGET FortranFloat128Math) - # If FortranFloat128Math is not defined, then we are not building - # standalone FortranFloat128Math library. Instead, include - # the relevant sources into FortranRuntime itself. +if (NOT TARGET flang_rt.quadmath) + # If flang_rt.quadmath is not defined, then we are not building + # standalone flang_rt.quadmath library. Instead, include + # the relevant sources into flang_rt.runtime itself. # The information is provided via FortranFloat128MathILib # interface library. get_target_property(f128_sources @@ -273,7 +275,7 @@ if (NOT TARGET FortranFloat128Math) endif() if (NOT DEFINED MSVC) - add_flang_library(FortranRuntime + add_flang_library(flang_rt.runtime ${sources} LINK_LIBS ${linked_libraries} @@ -281,41 +283,36 @@ if (NOT DEFINED MSVC) INSTALL_WITH_TOOLCHAIN ) else() - add_flang_library(FortranRuntime + add_flang_library(flang_rt.runtime ${sources} LINK_LIBS ${linked_libraries} ) set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded) - add_flang_library(FortranRuntime.static ${sources} - LINK_LIBS - FortranDecimal.static + add_flang_library(flang_rt.runtime.static ${sources} INSTALL_WITH_TOOLCHAIN) - set_target_properties(FortranRuntime.static PROPERTIES FOLDER "Flang/Runtime Libraries") + set_target_properties(flang_rt.runtime.static PROPERTIES FOLDER "Flang/Runtime Libraries") set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDLL) - add_flang_library(FortranRuntime.dynamic ${sources} - LINK_LIBS - FortranDecimal.dynamic + add_flang_library(flang_rt.runtime.dynamic ${sources} INSTALL_WITH_TOOLCHAIN) - set_target_properties(FortranRuntime.dynamic PROPERTIES FOLDER "Flang/Runtime Libraries") + set_target_properties(flang_rt.runtime.dynamic PROPERTIES FOLDER "Flang/Runtime Libraries") set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebug) - add_flang_library(FortranRuntime.static_dbg ${sources} - LINK_LIBS - FortranDecimal.static_dbg + add_flang_library(flang_rt.runtime.static_dbg ${sources} INSTALL_WITH_TOOLCHAIN) - set_target_properties(FortranRuntime.static_dbg PROPERTIES FOLDER "Flang/Runtime Libraries") + set_target_properties(flang_rt.runtime.static_dbg PROPERTIES FOLDER "Flang/Runtime Libraries") set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebugDLL) - add_flang_library(FortranRuntime.dynamic_dbg ${sources} - LINK_LIBS - FortranDecimal.dynamic_dbg + add_flang_library(flang_rt.runtime.dynamic_dbg ${sources} INSTALL_WITH_TOOLCHAIN) - set_target_properties(FortranRuntime.dynamic_dbg PROPERTIES FOLDER "Flang/Runtime Libraries") - add_dependencies(FortranRuntime FortranRuntime.static FortranRuntime.dynamic - FortranRuntime.static_dbg FortranRuntime.dynamic_dbg) + set_target_properties(flang_rt.runtime.dynamic_dbg PROPERTIES FOLDER "Flang/Runtime Libraries") + add_dependencies(flang_rt.runtime + flang_rt.runtime.static + flang_rt.runtime.dynamic + flang_rt.runtime.static_dbg + flang_rt.runtime.dynamic_dbg) endif() -set_target_properties(FortranRuntime PROPERTIES FOLDER "Flang/Runtime Libraries") +set_target_properties(flang_rt.runtime PROPERTIES FOLDER "Flang/Runtime Libraries") -# If FortranRuntime is part of a Flang build (and not a separate build) then +# If flang_rt is part of a Flang build (and not a separate build) then # add dependency to make sure that Fortran runtime library is being built after # we have the Flang compiler available. This also includes the MODULE files # that compile when the 'flang' target is built. @@ -323,9 +320,21 @@ set_target_properties(FortranRuntime PROPERTIES FOLDER "Flang/Runtime Libraries" # TODO: This is a workaround and should be updated when runtime build procedure # is changed to a regular runtime build. See discussion in PR #95388. if (TARGET flang AND TARGET module_files) - add_dependencies(FortranRuntime flang module_files) + add_dependencies(flang_rt.runtime flang module_files) endif() if (FLANG_CUF_RUNTIME) add_subdirectory(CUDA) endif() + +# Compatibility targets. +add_custom_target(flang-rt) +add_dependencies(flang-rt flang_rt.runtime) +if (TARGET flang_rt.quadmath) + add_dependencies(flang-rt flang_rt.quadmath) +endif () +if (TARGET flang_rt.cuda_${CUDAToolkit_VERSION_MAJOR}) + add_dependencies(flang-rt flang_rt.cuda_${CUDAToolkit_VERSION_MAJOR}) +endif () +add_custom_target(FortranRuntime) +add_dependencies(FortranRuntime flang_rt.runtime) diff --git a/flang/runtime/CUDA/CMakeLists.txt b/flang/runtime/CUDA/CMakeLists.txt index bfbae58086c1f..1fd3bf22a83cf 100644 --- a/flang/runtime/CUDA/CMakeLists.txt +++ b/flang/runtime/CUDA/CMakeLists.txt @@ -8,10 +8,10 @@ include_directories(${CUDAToolkit_INCLUDE_DIRS}) -# libCufRuntime depends on a certain version of CUDA. To be able to have +# libflang_rt.cuda depends on a certain version of CUDA. To be able to have # multiple build of this library with different CUDA version, the version is # added to the library name. -set(CUFRT_LIBNAME CufRuntime_cuda_${CUDAToolkit_VERSION_MAJOR}) +set(CUFRT_LIBNAME flang_rt.cuda_${CUDAToolkit_VERSION_MAJOR}) add_flang_library(${CUFRT_LIBNAME} allocator.cpp @@ -33,6 +33,6 @@ endif() target_link_libraries(${CUFRT_LIBNAME} PRIVATE - FortranRuntime + flang_rt.runtime ${CUDA_RT_TARGET} ) diff --git a/flang/runtime/Float128Math/CMakeLists.txt b/flang/runtime/Float128Math/CMakeLists.txt index 703f85fcaf8da..3c382d16a21cd 100644 --- a/flang/runtime/Float128Math/CMakeLists.txt +++ b/flang/runtime/Float128Math/CMakeLists.txt @@ -12,7 +12,7 @@ # It is distributed as a static library only. # Fortran programs/libraries that end up linking any of the provided # will have a dependency on the third-party library that is being -# used for building this FortranFloat128Math library. +# used for building this flang_rt.quadmath library. include(CheckLibraryExists) @@ -93,20 +93,20 @@ if (FLANG_RUNTIME_F128_MATH_LIB) ) endif() - add_flang_library(FortranFloat128Math STATIC INSTALL_WITH_TOOLCHAIN + add_flang_library(flang_rt.quadmath STATIC INSTALL_WITH_TOOLCHAIN ${sources}) if (DEFINED MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded) - add_flang_library(FortranFloat128Math.static STATIC INSTALL_WITH_TOOLCHAIN + add_flang_library(flang_rt.quadmath.static STATIC INSTALL_WITH_TOOLCHAIN ${sources} ) set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebug) - add_flang_library(FortranFloat128Math.static_dbg STATIC INSTALL_WITH_TOOLCHAIN + add_flang_library(flang_rt.quadmath.static_dbg STATIC INSTALL_WITH_TOOLCHAIN ${sources} ) - add_dependencies(FortranFloat128Math FortranFloat128Math.static - FortranFloat128Math.static_dbg + add_dependencies(flang_rt.quadmath flang_rt.quadmath.static + flang_rt.quadmath.static_dbg ) endif() elseif (HAVE_LDBL_MANT_DIG_113) @@ -118,7 +118,7 @@ elseif (HAVE_LDBL_MANT_DIG_113) ) target_sources(FortranFloat128MathILib INTERFACE ${sources}) else() - message(FATAL_ERROR "FortranRuntime cannot build without libm") + message(FATAL_ERROR "flang_rt.quadmath cannot build without libm") endif() else() # We can use '__float128' version from libc, if it has them. diff --git a/flang/runtime/time-intrinsic.cpp b/flang/runtime/time-intrinsic.cpp index e6f6e81c7b50c..942604a92aaad 100644 --- a/flang/runtime/time-intrinsic.cpp +++ b/flang/runtime/time-intrinsic.cpp @@ -62,7 +62,7 @@ template double GetCpuTime(fallback_implementation) { #if defined __MINGW32__ // clock_gettime is implemented in the pthread library for MinGW. -// Using it here would mean that all programs that link libFortranRuntime are +// Using it here would mean that all programs that link libflang_rt are // required to also link to pthread. Instead, don't use the function. #undef CLOCKID_CPU_TIME #undef CLOCKID_ELAPSED_TIME diff --git a/flang/runtime/tools.h b/flang/runtime/tools.h index facbd23161057..75544098d47ab 100644 --- a/flang/runtime/tools.h +++ b/flang/runtime/tools.h @@ -348,7 +348,7 @@ inline RT_API_ATTRS RESULT ApplyFloatingPointKind( if constexpr (HasCppTypeFor) { // If FUNC implemenation relies on FP math functions, // then we should not be here. The compiler should have - // generated a call to an entry in FortranFloat128Math + // generated a call to an entry in flang_rt.quadmath // library. if constexpr (!NEEDSMATH) { return FUNC<16>{}(std::forward(x)...); diff --git a/flang/test/CMakeLists.txt b/flang/test/CMakeLists.txt index cab214c2ef4c8..3fac8717e9bd9 100644 --- a/flang/test/CMakeLists.txt +++ b/flang/test/CMakeLists.txt @@ -71,9 +71,13 @@ set(FLANG_TEST_DEPENDS llvm-objdump llvm-readobj split-file - FortranRuntime FortranDecimal ) + +if (FLANG_INCLUDE_RUNTIME) + list(APPEND FLANG_TEST_DEPENDS flang_rt.runtime) +endif () + if (LLVM_ENABLE_PLUGINS AND NOT WIN32) list(APPEND FLANG_TEST_DEPENDS Bye) endif() @@ -120,3 +124,9 @@ if (DEFINED FLANG_TEST_TARGET_TRIPLE) "to use FLANG_TEST_TARGET_TRIPLE.") endif() endif() + +# Compatibility targets. +if (FLANG_INCLUDE_RUNTIME) + add_custom_target(check-flang-rt) + add_dependencies(check-flang-rt check-flang) +endif () diff --git a/flang/test/Driver/ctofortran.f90 b/flang/test/Driver/ctofortran.f90 index 78eac32133b18..10c7adaccc958 100644 --- a/flang/test/Driver/ctofortran.f90 +++ b/flang/test/Driver/ctofortran.f90 @@ -1,4 +1,5 @@ ! UNSUPPORTED: system-windows +! REQUIRES: flang-rt ! RUN: split-file %s %t ! RUN: chmod +x %t/runtest.sh ! RUN: %t/runtest.sh %t %t/ffile.f90 %t/cfile.c %flang | FileCheck %s diff --git a/flang/test/Driver/exec.f90 b/flang/test/Driver/exec.f90 index fd174005ddf62..9ca91ee24011c 100644 --- a/flang/test/Driver/exec.f90 +++ b/flang/test/Driver/exec.f90 @@ -1,4 +1,5 @@ ! UNSUPPORTED: system-windows +! REQUIRES: flang-rt ! Verify that flang can correctly build executables. ! RUN: %flang %s -o %t diff --git a/flang/test/Driver/gcc-toolchain-install-dir.f90 b/flang/test/Driver/gcc-toolchain-install-dir.f90 index 5a073b0c51712..e195bdde6d2c9 100644 --- a/flang/test/Driver/gcc-toolchain-install-dir.f90 +++ b/flang/test/Driver/gcc-toolchain-install-dir.f90 @@ -1,5 +1,5 @@ !! Test that --gcc-toolchain and --gcc-install-dir options are working as expected. -!! It does not test cross-compiling (--sysroot), so crtbegin.o, libgcc/compiler-rt, libc, libFortranRuntime, etc. are not supposed to be affected. +!! It does not test cross-compiling (--sysroot), so crtbegin.o, libgcc/compiler-rt, libc, libflang_rt.runtime, etc. are not supposed to be affected. !! PREFIX is captured twice because the driver escapes backslashes (occuring in Windows paths) in the -### output, but not on the "Selected GCC installation:" line. ! RUN: %flang 2>&1 -### -v -o %t %s -no-integrated-as -fuse-ld=ld --target=i386-unknown-linux-gnu --gcc-install-dir=%S/Inputs/basic_cross_linux_tree/usr/lib/gcc/i386-unknown-linux-gnu/10.2.0 | FileCheck %s --check-prefix=CHECK-I386 diff --git a/flang/test/Driver/linker-flags.f90 b/flang/test/Driver/linker-flags.f90 index b998cbaa6227c..4e62a8c32d360 100644 --- a/flang/test/Driver/linker-flags.f90 +++ b/flang/test/Driver/linker-flags.f90 @@ -29,40 +29,37 @@ ! executable and may find the GNU linker from MinGW or Cygwin. ! UNIX-LABEL: "{{.*}}ld{{(\.exe)?}}" ! UNIX-SAME: "[[object_file]]" -! UNIX-F128NONE-NOT: FortranFloat128Math -! SOLARIS-F128NONE-NOT: FortranFloat128Math -! UNIX-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "--as-needed" "-lquadmath" "--no-as-needed" -! SOLARIS-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "-z" "ignore" "-lquadmath" "-z" "record" -! UNIX-SAME: "-lFortranRuntime" "-lFortranDecimal" "-lm" +! UNIX-F128NONE-NOT: lang_rt.quadmath +! SOLARIS-F128NONE-NOT: flang_rt.quadmath +! UNIX-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "--as-needed" "-lquadmath" "--no-as-needed" +! SOLARIS-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "-z" "ignore" "-lquadmath" "-z" "record" +! UNIX-SAME: "-lflang_rt.runtime" "-lm" ! COMPILER-RT: "{{.*}}{{\\|/}}libclang_rt.builtins.a" ! BSD-LABEL: "{{.*}}ld{{(\.exe)?}}" ! BSD-SAME: "[[object_file]]" -! BSD-F128NONE-NOT: FortranFloat128Math -! BSD-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "--as-needed" "-lquadmath" "--no-as-needed" -! BSD-SAME: -lFortranRuntime -! BSD-SAME: -lFortranDecimal +! BSD-F128NONE-NOT: flang_rt.quadmath +! BSD-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "--as-needed" "-lquadmath" "--no-as-needed" +! BSD-SAME: -lflang_rt.runtime ! BSD-SAME: -lexecinfo ! DARWIN-LABEL: "{{.*}}ld{{(\.exe)?}}" ! DARWIN-SAME: "[[object_file]]" -! DARWIN-F128NONE-NOT: FortranFloat128Math -! DARWIN-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "--as-needed" "-lquadmath" "--no-as-needed" -! DARWIN-SAME: -lFortranRuntime -! DARWIN-SAME: -lFortranDecimal +! DARWIN-F128NONE-NOT: libflang_rt.quadmath +! DARWIN-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "--as-needed" "-lquadmath" "--no-as-needed" +! DARWIN-SAME: -lflang_rt.runtime ! HAIKU-LABEL: "{{.*}}ld{{(\.exe)?}}" ! HAIKU-SAME: "[[object_file]]" -! HAIKU-F128NONE-NOT: FortranFloat128Math -! HAIKU-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "--as-needed" "-lquadmath" "--no-as-needed" -! HAIKU-SAME: "-lFortranRuntime" "-lFortranDecimal" +! HAIKU-F128NONE-NOT: libflang_rt.quadmath +! HAIKU-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "--as-needed" "-lquadmath" "--no-as-needed" +! HAIKU-SAME: "-lflang_rt.runtime" ! MINGW-LABEL: "{{.*}}ld{{(\.exe)?}}" ! MINGW-SAME: "[[object_file]]" -! MINGW-F128NONE-NOT: FortranFloat128Math -! MINGW-F128LIBQUADMATH-SAME: "-lFortranFloat128Math" "--as-needed" "-lquadmath" "--no-as-needed" -! MINGW-SAME: -lFortranRuntime -! MINGW-SAME: -lFortranDecimal +! MINGW-F128NONE-NOT: libflang_rt.quadmath +! MINGW-F128LIBQUADMATH-SAME: "-lflang_rt.quadmath" "--as-needed" "-lquadmath" "--no-as-needed" +! MINGW-SAME: -lflang_rt.runtime ! NOTE: This also matches lld-link (when CLANG_DEFAULT_LINKER=lld) and ! any .exe suffix that is added when resolving to the full path of diff --git a/flang/test/Driver/msvc-dependent-lib-flags.f90 b/flang/test/Driver/msvc-dependent-lib-flags.f90 index 765917f07d8e7..641c73912c4d1 100644 --- a/flang/test/Driver/msvc-dependent-lib-flags.f90 +++ b/flang/test/Driver/msvc-dependent-lib-flags.f90 @@ -7,24 +7,21 @@ ! MSVC-SAME: --dependent-lib=clang_rt.builtins.lib ! MSVC-SAME: -D_MT ! MSVC-SAME: --dependent-lib=libcmt -! MSVC-SAME: --dependent-lib=FortranRuntime.static.lib -! MSVC-SAME: --dependent-lib=FortranDecimal.static.lib +! MSVC-SAME: --dependent-lib=flang_rt.runtime.static.lib ! MSVC-DEBUG: -fc1 ! MSVC-DEBUG-SAME: --dependent-lib=clang_rt.builtins.lib ! MSVC-DEBUG-SAME: -D_MT ! MSVC-DEBUG-SAME: -D_DEBUG ! MSVC-DEBUG-SAME: --dependent-lib=libcmtd -! MSVC-DEBUG-SAME: --dependent-lib=FortranRuntime.static_dbg.lib -! MSVC-DEBUG-SAME: --dependent-lib=FortranDecimal.static_dbg.lib +! MSVC-DEBUG-SAME: --dependent-lib=flang_rt.runtime.static_dbg.lib ! MSVC-DLL: -fc1 ! MSVC-DLL-SAME: --dependent-lib=clang_rt.builtins.lib ! MSVC-DLL-SAME: -D_MT ! MSVC-DLL-SAME: -D_DLL ! MSVC-DLL-SAME: --dependent-lib=msvcrt -! MSVC-DLL-SAME: --dependent-lib=FortranRuntime.dynamic.lib -! MSVC-DLL-SAME: --dependent-lib=FortranDecimal.dynamic.lib +! MSVC-DLL-SAME: --dependent-lib=flang_rt.runtime.dynamic.lib ! MSVC-DLL-DEBUG: -fc1 ! MSVC-DLL-DEBUG-SAME: --dependent-lib=clang_rt.builtins.lib @@ -32,5 +29,4 @@ ! MSVC-DLL-DEBUG-SAME: -D_DEBUG ! MSVC-DLL-DEBUG-SAME: -D_DLL ! MSVC-DLL-DEBUG-SAME: --dependent-lib=msvcrtd -! MSVC-DLL-DEBUG-SAME: --dependent-lib=FortranRuntime.dynamic_dbg.lib -! MSVC-DLL-DEBUG-SAME: --dependent-lib=FortranDecimal.dynamic_dbg.lib +! MSVC-DLL-DEBUG-SAME: --dependent-lib=flang_rt.runtime.dynamic_dbg.lib diff --git a/flang/test/Driver/nostdlib.f90 b/flang/test/Driver/nostdlib.f90 index cd707e632a45f..dc23be6462376 100644 --- a/flang/test/Driver/nostdlib.f90 +++ b/flang/test/Driver/nostdlib.f90 @@ -24,6 +24,5 @@ ! in certain cases. But it is not clear that it is worth checking for each ! platform individually. -! CHECK-NOT: "-lFortranRuntime" -! CHECK-NOT: "-lFortranDecimal" +! CHECK-NOT: "-lflang_rt.runtime" ! CHECK-NOT: "-lgcc" diff --git a/flang/test/Evaluate/fold-ieee.f90 b/flang/test/Evaluate/fold-ieee.f90 index 99f8526fd23db..a393fcc6b4297 100644 --- a/flang/test/Evaluate/fold-ieee.f90 +++ b/flang/test/Evaluate/fold-ieee.f90 @@ -54,9 +54,6 @@ module m logical, parameter :: test_sq_all = ieee_support_sqrt() logical, parameter :: test_sq_4 = ieee_support_sqrt(1.) logical, parameter :: test_sq_8 = ieee_support_sqrt(1.d0) - logical, parameter :: test_std_all = ieee_support_standard() - logical, parameter :: test_std_4 = ieee_support_standard(1.) - logical, parameter :: test_std_8 = ieee_support_standard(1.d0) logical, parameter :: test_sn_all = ieee_support_subnormal() logical, parameter :: test_sn_4 = ieee_support_subnormal(1.) logical, parameter :: test_sn_8 = ieee_support_subnormal(1.d0) @@ -64,5 +61,8 @@ module m logical, parameter :: test_uc_all = .not. ieee_support_underflow_control() logical, parameter :: test_uc_4 = ieee_support_underflow_control(1.) logical, parameter :: test_uc_8 = ieee_support_underflow_control(1.d0) + logical, parameter :: test_std_all = ieee_support_standard() + logical, parameter :: test_std_4 = ieee_support_standard(1.) + logical, parameter :: test_std_8 = ieee_support_standard(1.d0) #endif end diff --git a/flang/test/Evaluate/folding18.f90 b/flang/test/Evaluate/folding18.f90 index 52aeb6a3532d0..a27eeabefae55 100644 --- a/flang/test/Evaluate/folding18.f90 +++ b/flang/test/Evaluate/folding18.f90 @@ -51,6 +51,7 @@ module m .and. ieee_support_sqrt(1.0_8) & .and. ieee_support_sqrt(1.0_10) & .and. ieee_support_sqrt(1.0_16) +#if __x86_64__ logical, parameter :: test_ieee_support_standard = ieee_support_standard() & .and. ieee_support_standard(1.0_2) & .and. ieee_support_standard(1.0_3) & @@ -58,6 +59,7 @@ module m .and. ieee_support_standard(1.0_8) & .and. ieee_support_standard(1.0_10) & .and. ieee_support_standard(1.0_16) +#endif logical, parameter :: test_ieee_support_subnormal = ieee_support_subnormal() & .and. ieee_support_subnormal(1.0_2) & .and. ieee_support_subnormal(1.0_3) & diff --git a/flang/test/Integration/unroll.f90 b/flang/test/Integration/unroll.f90 index 9d69605e10d1b..aa47e465b63fc 100644 --- a/flang/test/Integration/unroll.f90 +++ b/flang/test/Integration/unroll.f90 @@ -3,14 +3,47 @@ ! CHECK-LABEL: unroll_dir subroutine unroll_dir integer :: a(10) - !dir$ unroll - ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[ANNOTATION:.*]] + !dir$ unroll + ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[UNROLL_ENABLE_FULL_ANNO:.*]] do i=1,10 - a(i)=i + a(i)=i end do end subroutine unroll_dir -! CHECK: ![[ANNOTATION]] = distinct !{![[ANNOTATION]], ![[UNROLL:.*]], ![[UNROLL_FULL:.*]]} -! CHECK: ![[UNROLL]] = !{!"llvm.loop.unroll.enable"} -! CHECK: ![[UNROLL_FULL]] = !{!"llvm.loop.unroll.full"} +! CHECK-LABEL: unroll_dir_0 +subroutine unroll_dir_0 + integer :: a(10) + !dir$ unroll 0 + ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[UNROLL_DISABLE_ANNO:.*]] + do i=1,10 + a(i)=i + end do +end subroutine unroll_dir_0 + +! CHECK-LABEL: unroll_dir_1 +subroutine unroll_dir_1 + integer :: a(10) + !dir$ unroll 1 + ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[UNROLL_DISABLE_ANNO]] + do i=1,10 + a(i)=i + end do +end subroutine unroll_dir_1 + +! CHECK-LABEL: unroll_dir_2 +subroutine unroll_dir_2 + integer :: a(10) + !dir$ unroll 2 + ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[UNROLL_ENABLE_COUNT_2:.*]] + do i=1,10 + a(i)=i + end do +end subroutine unroll_dir_2 +! CHECK: ![[UNROLL_ENABLE_FULL_ANNO]] = distinct !{![[UNROLL_ENABLE_FULL_ANNO]], ![[UNROLL_ENABLE:.*]], ![[UNROLL_FULL:.*]]} +! CHECK: ![[UNROLL_ENABLE:.*]] = !{!"llvm.loop.unroll.enable"} +! CHECK: ![[UNROLL_FULL:.*]] = !{!"llvm.loop.unroll.full"} +! CHECK: ![[UNROLL_DISABLE_ANNO]] = distinct !{![[UNROLL_DISABLE_ANNO]], ![[UNROLL_DISABLE:.*]]} +! CHECK: ![[UNROLL_DISABLE]] = !{!"llvm.loop.unroll.disable"} +! CHECK: ![[UNROLL_ENABLE_COUNT_2]] = distinct !{![[UNROLL_ENABLE_COUNT_2]], ![[UNROLL_ENABLE]], ![[UNROLL_COUNT_2:.*]]} +! CHECK: ![[UNROLL_COUNT_2]] = !{!"llvm.loop.unroll.count", i32 2} diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf index ec825263474c1..17a6a1d965640 100644 --- a/flang/test/Lower/CUDA/cuda-device-proc.cuf +++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf @@ -47,7 +47,7 @@ end ! CHECK-LABEL: func.func @_QPdevsub() attributes {cuf.proc_attr = #cuf.cuda_proc} ! CHECK: fir.call @llvm.nvvm.barrier0() fastmath : () -> () -! CHECK: fir.call @__syncwarp(%{{.*}}) proc_attrs fastmath : (i32) -> () +! CHECK: fir.call @llvm.nvvm.bar.warp.sync(%c1{{.*}}) fastmath : (i32) -> () ! CHECK: fir.call @llvm.nvvm.membar.gl() fastmath : () -> () ! CHECK: fir.call @llvm.nvvm.membar.cta() fastmath : () -> () ! CHECK: fir.call @llvm.nvvm.membar.sys() fastmath : () -> () @@ -102,13 +102,13 @@ end ! CHECK-LABEL: func.func @_QPhost1() ! CHECK: cuf.kernel ! CHECK: fir.call @llvm.nvvm.barrier0() fastmath : () -> () -! CHECK: fir.call @__syncwarp(%c1{{.*}}) proc_attrs fastmath : (i32) -> () +! CHECK: fir.call @llvm.nvvm.bar.warp.sync(%c1{{.*}}) fastmath : (i32) -> () ! CHECK: fir.call @llvm.nvvm.barrier0.and(%c1{{.*}}) fastmath : (i32) -> i32 ! CHECK: fir.call @llvm.nvvm.barrier0.popc(%c1{{.*}}) fastmath : (i32) -> i32 ! CHECK: fir.call @llvm.nvvm.barrier0.or(%c1{{.*}}) fastmath : (i32) -> i32 ! CHECK: func.func private @llvm.nvvm.barrier0() -! CHECK: func.func private @__syncwarp(i32) attributes {cuf.proc_attr = #cuf.cuda_proc, fir.bindc_name = "__syncwarp", fir.proc_attrs = #fir.proc_attrs} +! CHECK: func.func private @llvm.nvvm.bar.warp.sync(i32) ! CHECK: func.func private @llvm.nvvm.membar.gl() ! CHECK: func.func private @llvm.nvvm.membar.cta() ! CHECK: func.func private @llvm.nvvm.membar.sys() diff --git a/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 b/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 index 214fe423628d6..355fd6c3a742a 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 @@ -719,8 +719,8 @@ subroutine vec_xlds_testi64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[shflv]], ptr %2, align 16 end subroutine vec_xlds_testi64a @@ -734,8 +734,8 @@ subroutine vec_xlds_testf64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: %[[bc:.*]] = bitcast <2 x i64> %[[shflv]] to <2 x double> ! LLVMIR: store <2 x double> %[[bc]], ptr %2, align 16 end subroutine vec_xlds_testf64a diff --git a/flang/test/Lower/PowerPC/ppc-vec-load.f90 b/flang/test/Lower/PowerPC/ppc-vec-load.f90 index a81ed055ce08c..f2c918ecf5bfe 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-load.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-load.f90 @@ -683,8 +683,8 @@ subroutine vec_xlds_testi64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[shfl]], ptr %2, align 16 end subroutine vec_xlds_testi64a @@ -698,8 +698,8 @@ subroutine vec_xlds_testf64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: %[[bc:.*]] = bitcast <2 x i64> %[[shfl]] to <2 x double> ! LLVMIR: store <2 x double> %[[bc]], ptr %2, align 16 end subroutine vec_xlds_testf64a diff --git a/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 index d95e9828531cd..50604e1f720f3 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 @@ -8,8 +8,8 @@ subroutine vec_splat_testf32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i64 @@ -20,7 +20,7 @@ subroutine vec_splat_testu8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i16 diff --git a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat.f90 index 17558926afd5f..f3c1f19d5877d 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-splat.f90 @@ -14,8 +14,8 @@ subroutine vec_splat_testi8i8(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i8 @@ -27,8 +27,8 @@ subroutine vec_splat_testi8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i16 @@ -40,8 +40,8 @@ subroutine vec_splat_testi8i32(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i32 @@ -53,8 +53,8 @@ subroutine vec_splat_testi8i64(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i64 @@ -66,8 +66,8 @@ subroutine vec_splat_testi16i8(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i8 @@ -79,8 +79,8 @@ subroutine vec_splat_testi16i16(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i16 @@ -92,8 +92,8 @@ subroutine vec_splat_testi16i32(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i32 @@ -105,8 +105,8 @@ subroutine vec_splat_testi16i64(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i64 @@ -118,8 +118,8 @@ subroutine vec_splat_testi32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i8 @@ -131,8 +131,8 @@ subroutine vec_splat_testi32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i16 @@ -144,8 +144,8 @@ subroutine vec_splat_testi32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i32 @@ -157,8 +157,8 @@ subroutine vec_splat_testi32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i64 @@ -170,8 +170,8 @@ subroutine vec_splat_testi64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i8 @@ -183,8 +183,8 @@ subroutine vec_splat_testi64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i16 @@ -196,8 +196,8 @@ subroutine vec_splat_testi64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i32 @@ -209,8 +209,8 @@ subroutine vec_splat_testi64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i64 @@ -222,8 +222,8 @@ subroutine vec_splat_testf32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i8 @@ -235,8 +235,8 @@ subroutine vec_splat_testf32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i16 @@ -248,8 +248,8 @@ subroutine vec_splat_testf32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i32 @@ -261,8 +261,8 @@ subroutine vec_splat_testf32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i64 @@ -274,8 +274,8 @@ subroutine vec_splat_testf64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i8 @@ -287,8 +287,8 @@ subroutine vec_splat_testf64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i16 @@ -300,8 +300,8 @@ subroutine vec_splat_testf64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i32 @@ -313,8 +313,8 @@ subroutine vec_splat_testf64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i64 @@ -326,8 +326,8 @@ subroutine vec_splat_testu8i8(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i8 @@ -339,8 +339,8 @@ subroutine vec_splat_testu8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i16 @@ -352,8 +352,8 @@ subroutine vec_splat_testu8i32(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i32 @@ -365,8 +365,8 @@ subroutine vec_splat_testu8i64(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i64 @@ -378,8 +378,8 @@ subroutine vec_splat_testu16i8(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i8 @@ -391,8 +391,8 @@ subroutine vec_splat_testu16i16(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i16 @@ -404,8 +404,8 @@ subroutine vec_splat_testu16i32(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i32 @@ -417,8 +417,8 @@ subroutine vec_splat_testu16i64(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i64 @@ -430,8 +430,8 @@ subroutine vec_splat_testu32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i8 @@ -443,8 +443,8 @@ subroutine vec_splat_testu32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i16 @@ -456,8 +456,8 @@ subroutine vec_splat_testu32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i32 @@ -469,8 +469,8 @@ subroutine vec_splat_testu32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i64 @@ -482,8 +482,8 @@ subroutine vec_splat_testu64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i8 @@ -495,8 +495,8 @@ subroutine vec_splat_testu64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i16 @@ -508,8 +508,8 @@ subroutine vec_splat_testu64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i32 @@ -521,8 +521,8 @@ subroutine vec_splat_testu64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i64 @@ -537,8 +537,8 @@ subroutine vec_splats_testi8(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i8, ptr %{{[0-9]}}, align 1 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi8 @@ -549,8 +549,8 @@ subroutine vec_splats_testi16(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i16, ptr %{{[0-9]}}, align 2 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi16 @@ -561,8 +561,8 @@ subroutine vec_splats_testi32(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i32, ptr %{{[0-9]}}, align 4 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi32 @@ -573,8 +573,8 @@ subroutine vec_splats_testi64(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i64, ptr %{{[0-9]}}, align 8 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi64 @@ -585,8 +585,8 @@ subroutine vec_splats_testf32(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load float, ptr %{{[0-9]}}, align 4 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testf32 @@ -597,8 +597,8 @@ subroutine vec_splats_testf64(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load double, ptr %{{[0-9]}}, align 8 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testf64 diff --git a/flang/test/Runtime/no-cpp-dep.c b/flang/test/Runtime/no-cpp-dep.c index 606a5d189f719..4fcf8f9d478d8 100644 --- a/flang/test/Runtime/no-cpp-dep.c +++ b/flang/test/Runtime/no-cpp-dep.c @@ -1,12 +1,12 @@ /* This test makes sure that flang's runtime does not depend on the C++ runtime -library. It tries to link this simple file against libFortranRuntime.a with +library. It tries to link this simple file against libflang_rt.runtime.a with a C compiler. -REQUIRES: c-compiler +REQUIRES: c-compiler, flang-rt RUN: %if system-aix %{ export OBJECT_MODE=64 %} -RUN: %cc -std=c99 %s -I%include %libruntime %libdecimal -lm \ +RUN: %cc -std=c99 %s -I%include %libruntime -lm \ RUN: %if system-aix %{-lpthread %} RUN: rm a.out */ diff --git a/flang/test/Semantics/cuf03.cuf b/flang/test/Semantics/cuf03.cuf index 93b136ad7d315..fe9dd5b3ecf05 100644 --- a/flang/test/Semantics/cuf03.cuf +++ b/flang/test/Semantics/cuf03.cuf @@ -54,6 +54,14 @@ module m !ERROR: Object 'um' with ATTRIBUTES(UNIFIED) must be declared in a host subprogram real, unified :: um + type :: t3 + !ERROR: Component 'r' with ATTRIBUTES(DEVICE) must also be allocatable or pointer + real, device :: r + real, device, pointer :: rp ! ok + real, device, allocatable :: ra ! ok + real, device, pointer, contiguous :: rpc ! ok + end type + contains attributes(device) subroutine devsubr(n,da,rs) integer, intent(in) :: n diff --git a/flang/test/lit.cfg.py b/flang/test/lit.cfg.py index f43234fb125b7..c6266f3976f7c 100644 --- a/flang/test/lit.cfg.py +++ b/flang/test/lit.cfg.py @@ -163,17 +163,18 @@ ToolSubst("%not_todo_abort_cmd", command=FindTool("not"), unresolved="fatal") ) +if config.flang_include_runtime: + config.available_features.add("flang-rt") + # Define some variables to help us test that the flang runtime doesn't depend on # the C++ runtime libraries. For this we need a C compiler. If for some reason # we don't have one, we can just disable the test. -if config.cc: - libruntime = os.path.join(config.flang_lib_dir, "libFortranRuntime.a") - libdecimal = os.path.join(config.flang_lib_dir, "libFortranDecimal.a") +if config.flang_include_runtime and config.cc: + libruntime = os.path.join(config.flang_lib_dir, "libflang_rt.runtime.a") include = os.path.join(config.flang_src_dir, "include") if ( os.path.isfile(libruntime) - and os.path.isfile(libdecimal) and os.path.isdir(include) ): config.available_features.add("c-compiler") @@ -183,7 +184,6 @@ ) ) tools.append(ToolSubst("%libruntime", command=libruntime, unresolved="fatal")) - tools.append(ToolSubst("%libdecimal", command=libdecimal, unresolved="fatal")) tools.append(ToolSubst("%include", command=include, unresolved="fatal")) # Add all the tools and their substitutions (if applicable). Use the search paths provided for diff --git a/flang/test/lit.site.cfg.py.in b/flang/test/lit.site.cfg.py.in index d1a0ac763cf8a..697ba3fa79763 100644 --- a/flang/test/lit.site.cfg.py.in +++ b/flang/test/lit.site.cfg.py.in @@ -1,6 +1,7 @@ @LIT_SITE_CFG_IN_HEADER@ import sys +import lit.util config.llvm_tools_dir = lit_config.substitute("@LLVM_TOOLS_DIR@") config.llvm_shlib_dir = lit_config.substitute(path(r"@SHLIBDIR@")) @@ -32,6 +33,7 @@ else: config.openmp_module_dir = None config.flang_runtime_f128_math_lib = "@FLANG_RUNTIME_F128_MATH_LIB@" config.have_ldbl_mant_dig_113 = "@HAVE_LDBL_MANT_DIG_113@" +config.flang_include_runtime = lit.util.pythonize_bool("@FLANG_INCLUDE_RUNTIME@") import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/flang/tools/f18/CMakeLists.txt b/flang/tools/f18/CMakeLists.txt index cc2bc5b8eb5ce..5b5f23b5dc73c 100644 --- a/flang/tools/f18/CMakeLists.txt +++ b/flang/tools/f18/CMakeLists.txt @@ -5,7 +5,7 @@ set(LLVM_LINK_COMPONENTS ) # Define the list of Fortran module files that need to be compiled -# to produce an object file for inclusion into the FortranRuntime +# to produce an object file for inclusion into the flang_rt.runtime # library. set(MODULES_WITH_IMPLEMENTATION "iso_fortran_env_impl" @@ -105,11 +105,11 @@ if (NOT CMAKE_CROSSCOMPILING) endif() # Some modules have an implementation part that needs to be added to the - # FortranRuntime library. + # flang_rt.runtime library. set(compile_with "-fsyntax-only") set(object_output "") set(include_in_link FALSE) - if(${filename} IN_LIST MODULES_WITH_IMPLEMENTATION) + if(${filename} IN_LIST MODULES_WITH_IMPLEMENTATION AND FLANG_INCLUDE_RUNTIME) set(object_output "${CMAKE_CURRENT_BINARY_DIR}/${filename}${CMAKE_CXX_OUTPUT_EXTENSION}") set(compile_with -c -o ${object_output}) set(include_in_link TRUE) @@ -127,14 +127,14 @@ if (NOT CMAKE_CROSSCOMPILING) install(FILES ${base}.mod DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/flang") # If a module has been compiled into an object file, add the file to - # the link line for the FortranRuntime library. + # the link line for the flang_rt.runtime library. if(include_in_link) list(APPEND module_objects ${object_output}) endif() endforeach() # Set a CACHE variable that is visible to the CMakeLists.txt in runtime/, so that - # the compiled Fortran modules can be added to the link line of the FortranRuntime + # the compiled Fortran modules can be added to the link line of the flang_rt.runtime # library. set(FORTRAN_MODULE_OBJECTS ${module_objects} CACHE INTERNAL "" FORCE) diff --git a/flang/unittests/CMakeLists.txt b/flang/unittests/CMakeLists.txt index 945067fed4f82..c54ceb3332abf 100644 --- a/flang/unittests/CMakeLists.txt +++ b/flang/unittests/CMakeLists.txt @@ -1,6 +1,8 @@ +include(AddFlangOffloadRuntime) + if (FLANG_EXPERIMENTAL_CUDA_RUNTIME) # If Fortran runtime is built as CUDA library, the linking - # of targets that link FortranRuntime must be done + # of targets that link flang_rt.runtime must be done # with CUDA_RESOLVE_DEVICE_SYMBOLS. # CUDA language must be enabled for CUDA_RESOLVE_DEVICE_SYMBOLS # to take effect. @@ -11,6 +13,11 @@ add_custom_target(FlangUnitTests) set_target_properties(FlangUnitTests PROPERTIES FOLDER "Flang/Tests") function(add_flang_unittest_offload_properties target) + # Do not apply runtime properties if not even compiling the runtime. + if (NOT FLANG_INCLUDE_RUNTIME) + return () + endif () + # Set CUDA_RESOLVE_DEVICE_SYMBOLS. if (FLANG_EXPERIMENTAL_CUDA_RUNTIME) set_target_properties(${target} @@ -75,5 +82,7 @@ add_subdirectory(Optimizer) add_subdirectory(Common) add_subdirectory(Decimal) add_subdirectory(Evaluate) -add_subdirectory(Runtime) +if (FLANG_INCLUDE_RUNTIME) + add_subdirectory(Runtime) +endif () add_subdirectory(Frontend) diff --git a/flang/unittests/Evaluate/CMakeLists.txt b/flang/unittests/Evaluate/CMakeLists.txt index 8111ecd72cfc7..2278d61febcb1 100644 --- a/flang/unittests/Evaluate/CMakeLists.txt +++ b/flang/unittests/Evaluate/CMakeLists.txt @@ -33,7 +33,6 @@ add_flang_nongtest_unittest(intrinsics FortranDecimal FortranSemantics FortranParser - FortranRuntime ) add_flang_nongtest_unittest(logical @@ -56,19 +55,21 @@ add_flang_nongtest_unittest(real ) llvm_update_compile_flags(real.test) -add_flang_nongtest_unittest(reshape - NonGTestTesting - FortranSemantics - FortranEvaluate - FortranRuntime -) +if (FLANG_INCLUDE_RUNTIME) + add_flang_nongtest_unittest(reshape + NonGTestTesting + FortranSemantics + FortranEvaluate + flang_rt.runtime + ) -add_flang_nongtest_unittest(ISO-Fortran-binding - NonGTestTesting - FortranEvaluate - FortranSemantics - FortranRuntime -) + add_flang_nongtest_unittest(ISO-Fortran-binding + NonGTestTesting + FortranEvaluate + FortranSemantics + flang_rt.runtime + ) +endif () add_flang_nongtest_unittest(folding FortranSupport diff --git a/flang/unittests/Runtime/CMakeLists.txt b/flang/unittests/Runtime/CMakeLists.txt index 179e439917ff2..f3743be49b015 100644 --- a/flang/unittests/Runtime/CMakeLists.txt +++ b/flang/unittests/Runtime/CMakeLists.txt @@ -33,7 +33,7 @@ add_flang_unittest(FlangRuntimeTests target_link_libraries(FlangRuntimeTests PRIVATE - FortranRuntime + flang_rt.runtime ) target_compile_definitions(FlangRuntimeTests PRIVATE NOT_EXE="$") diff --git a/flang/unittests/Runtime/CUDA/CMakeLists.txt b/flang/unittests/Runtime/CUDA/CMakeLists.txt index a7fe604d687bd..860b2664d623b 100644 --- a/flang/unittests/Runtime/CUDA/CMakeLists.txt +++ b/flang/unittests/Runtime/CUDA/CMakeLists.txt @@ -15,8 +15,8 @@ endif() target_link_libraries(FlangCufRuntimeTests PRIVATE ${CUDA_RT_TARGET} - CufRuntime_cuda_${CUDAToolkit_VERSION_MAJOR} - FortranRuntime + flang_rt.cuda_${CUDAToolkit_VERSION_MAJOR} + flang_rt.runtime ) target_include_directories(FlangCufRuntimeTests PRIVATE ${CUDAToolkit_INCLUDE_DIRS}) diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt index c061e2a05ebd8..1c4c0cd5aa22b 100644 --- a/libc/CMakeLists.txt +++ b/libc/CMakeLists.txt @@ -51,7 +51,8 @@ set(LIBC_KERNEL_HEADERS "/usr/include" CACHE STRING "Path to Linux kernel header # Defining a global namespace to enclose all libc functions. set(default_namespace "__llvm_libc") if(LLVM_VERSION_MAJOR) - set(default_namespace "__llvm_libc_${LLVM_VERSION_MAJOR}_${LLVM_VERSION_MINOR}_${LLVM_VERSION_PATCH}_${LLVM_VERSION_SUFFIX}") + string(REPLACE "-" "" NS_LLVM_VERSION_SUFFIX ${LLVM_VERSION_SUFFIX}) + set(default_namespace "__llvm_libc_${LLVM_VERSION_MAJOR}_${LLVM_VERSION_MINOR}_${LLVM_VERSION_PATCH}_${NS_LLVM_VERSION_SUFFIX}") endif() set(LIBC_NAMESPACE ${default_namespace} CACHE STRING "The namespace to use to enclose internal implementations. Must start with '__llvm_libc'." diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake index 10bb9c9487d63..ffbdb40cd5091 100644 --- a/libc/cmake/modules/LLVMLibCTestRules.cmake +++ b/libc/cmake/modules/LLVMLibCTestRules.cmake @@ -38,9 +38,8 @@ function(_get_common_test_compile_options output_var c_test flags) endif() # list(APPEND compile_options "-Wconversion") # list(APPEND compile_options "-Wno-sign-conversion") - # list(APPEND compile_options "-Wimplicit-fallthrough") - # list(APPEND compile_options "-Wwrite-strings") - list(APPEND compile_options "-Wextra-semi") + list(APPEND compile_options "-Wimplicit-fallthrough") + list(APPEND compile_options "-Wwrite-strings") # Silence this warning because _Complex is a part of C99. if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(NOT c_test) @@ -51,13 +50,14 @@ function(_get_common_test_compile_options output_var c_test flags) list(APPEND compile_options "-Wno-gnu-imaginary-constant") endif() list(APPEND compile_options "-Wno-pedantic") - # if(NOT CMAKE_COMPILER_IS_GNUCXX) - # list(APPEND compile_options "-Wnewline-eof") - # list(APPEND compile_options "-Wnonportable-system-include-path") - # list(APPEND compile_options "-Wstrict-prototypes") - # list(APPEND compile_options "-Wthread-safety") - # list(APPEND compile_options "-Wglobal-constructors") - # endif() + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + list(APPEND compile_options "-Wstrict-prototypes") + list(APPEND compile_options "-Wextra-semi") + list(APPEND compile_options "-Wnewline-eof") + list(APPEND compile_options "-Wnonportable-system-include-path") + list(APPEND compile_options "-Wthread-safety") + # list(APPEND compile_options "-Wglobal-constructors") + endif() endif() set(${output_var} ${compile_options} PARENT_SCOPE) endfunction() @@ -223,6 +223,8 @@ function(create_libc_unittest fq_target_name) _get_common_test_compile_options(compile_options "${LIBC_UNITTEST_C_TEST}" "${LIBC_UNITTEST_FLAGS}") + # TODO: Ideally we would have a separate function for link options. + set(link_options ${compile_options}) list(APPEND compile_options ${LIBC_UNITTEST_COMPILE_OPTIONS}) if(SHOW_INTERMEDIATE_OBJECTS) @@ -277,7 +279,7 @@ function(create_libc_unittest fq_target_name) target_include_directories(${fq_build_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR}) target_include_directories(${fq_build_target_name} PRIVATE ${LIBC_SOURCE_DIR}) target_compile_options(${fq_build_target_name} PRIVATE ${compile_options}) - target_link_options(${fq_build_target_name} PRIVATE ${compile_options}) + target_link_options(${fq_build_target_name} PRIVATE ${link_options}) if(NOT LIBC_UNITTEST_CXX_STANDARD) set(LIBC_UNITTEST_CXX_STANDARD ${CMAKE_CXX_STANDARD}) diff --git a/libc/hdr/types/CMakeLists.txt b/libc/hdr/types/CMakeLists.txt index dfc90009ef54a..84a2647ba664d 100644 --- a/libc/hdr/types/CMakeLists.txt +++ b/libc/hdr/types/CMakeLists.txt @@ -250,15 +250,6 @@ add_proxy_header_library( libc.include.locale ) -add_proxy_header_library( - sighandler_t - HDRS - sighandler_t.h - FULL_BUILD_DEPENDS - libc.include.llvm-libc-types.__sighandler_t - libc.include.signal -) - add_proxy_header_library( stack_t HDRS diff --git a/libc/hdr/types/sighandler_t.h b/libc/hdr/types/sighandler_t.h deleted file mode 100644 index bc40dd8b4c8f4..0000000000000 --- a/libc/hdr/types/sighandler_t.h +++ /dev/null @@ -1,24 +0,0 @@ -//===-- Definition of macros from __sighandler_t.h ------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H -#define LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H - -#ifdef LIBC_FULL_BUILD - -#include "include/llvm-libc-types/__sighandler_t.h" - -using sighandler_t = __sighandler_t; - -#else // overlay mode - -#include - -#endif // LLVM_LIBC_FULL_BUILD - -#endif // LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt index 63745542662d5..867bd1e5ee20f 100644 --- a/libc/include/CMakeLists.txt +++ b/libc/include/CMakeLists.txt @@ -284,13 +284,14 @@ add_header_macro( signal.h DEPENDS .llvm-libc-macros.signal_macros + .llvm-libc-types.pid_t .llvm-libc-types.sig_atomic_t + .llvm-libc-types.sighandler_t + .llvm-libc-types.siginfo_t .llvm-libc-types.sigset_t + .llvm-libc-types.stack_t .llvm-libc-types.struct_sigaction .llvm-libc-types.union_sigval - .llvm-libc-types.siginfo_t - .llvm-libc-types.stack_t - .llvm-libc-types.pid_t ) add_header_macro( diff --git a/libc/include/llvm-libc-macros/gpu/signal-macros.h b/libc/include/llvm-libc-macros/gpu/signal-macros.h index 2d8159240de8b..f0d49ea34fe0e 100644 --- a/libc/include/llvm-libc-macros/gpu/signal-macros.h +++ b/libc/include/llvm-libc-macros/gpu/signal-macros.h @@ -16,9 +16,9 @@ #define SIGSEGV 11 #define SIGTERM 15 -#define SIG_DFL ((__sighandler_t)(0)) -#define SIG_IGN ((__sighandler_t)(1)) -#define SIG_ERR ((__sighandler_t)(-1)) +#define SIG_DFL ((void (*)(int))(0)) +#define SIG_IGN ((void (*)(int))(1)) +#define SIG_ERR ((void (*)(int))(-1)) // Max signal number #define NSIG 64 diff --git a/libc/include/llvm-libc-macros/linux/signal-macros.h b/libc/include/llvm-libc-macros/linux/signal-macros.h index 0b7317ebc9b80..d220241a38206 100644 --- a/libc/include/llvm-libc-macros/linux/signal-macros.h +++ b/libc/include/llvm-libc-macros/linux/signal-macros.h @@ -86,9 +86,9 @@ #error "Signal stack sizes not defined for your platform." #endif -#define SIG_DFL ((__sighandler_t)0) -#define SIG_IGN ((__sighandler_t)1) -#define SIG_ERR ((__sighandler_t)-1) +#define SIG_DFL ((void (*)(int))0) +#define SIG_IGN ((void (*)(int))1) +#define SIG_ERR ((void (*)(int))(-1)) // SIGCHLD si_codes #define CLD_EXITED 1 // child has exited diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt index 9e8d2f818d4ed..7ed69ab1af6d9 100644 --- a/libc/include/llvm-libc-types/CMakeLists.txt +++ b/libc/include/llvm-libc-types/CMakeLists.txt @@ -15,7 +15,6 @@ add_header(__pthread_start_t HDR __pthread_start_t.h) add_header(__pthread_tss_dtor_t HDR __pthread_tss_dtor_t.h) add_header(__qsortcompare_t HDR __qsortcompare_t.h) add_header(__qsortrcompare_t HDR __qsortrcompare_t.h) -add_header(__sighandler_t HDR __sighandler_t.h) add_header(__thread_type HDR __thread_type.h) add_header(blkcnt_t HDR blkcnt_t.h) add_header(blksize_t HDR blksize_t.h) @@ -66,6 +65,7 @@ if(LIBC_TYPES_TIME_T_IS_32_BIT) else() add_header(time_t HDR time_t_64.h DEST_HDR time_t.h) endif() +add_header(sighandler_t HDR sighandler_t.h) add_header(stack_t HDR stack_t.h DEPENDS .size_t) add_header(suseconds_t HDR suseconds_t.h) add_header(struct_dirent HDR struct_dirent.h DEPENDS .ino_t .off_t) diff --git a/libc/include/llvm-libc-types/__sighandler_t.h b/libc/include/llvm-libc-types/sighandler_t.h similarity index 52% rename from libc/include/llvm-libc-types/__sighandler_t.h rename to libc/include/llvm-libc-types/sighandler_t.h index 9c1ac997fc4ee..f39ab04685200 100644 --- a/libc/include/llvm-libc-types/__sighandler_t.h +++ b/libc/include/llvm-libc-types/sighandler_t.h @@ -1,4 +1,4 @@ -//===-- Definition of struct __sighandler_t -------------------------------===// +//===-- Definition of sighandler_t ----------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,9 +6,12 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_LIBC_TYPES___SIGHANDLER_T_H -#define LLVM_LIBC_TYPES___SIGHANDLER_T_H +#ifndef LLVM_LIBC_TYPES_SIGHANDLER_T_H +#define LLVM_LIBC_TYPES_SIGHANDLER_T_H -typedef void (*__sighandler_t)(int); +#ifdef __linux__ +// For compatibility with glibc. +typedef void (*sighandler_t)(int); +#endif -#endif // LLVM_LIBC_TYPES___SIGHANDLER_T_H +#endif // LLVM_LIBC_TYPES_SIGHANDLER_T_H diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h index b4d0c965a4c63..907418b5e0f9a 100644 --- a/libc/include/llvm-libc-types/struct_sigaction.h +++ b/libc/include/llvm-libc-types/struct_sigaction.h @@ -25,6 +25,4 @@ struct sigaction { #endif }; -typedef void (*__sighandler_t)(int); - #endif // LLVM_LIBC_TYPES_STRUCT_SIGACTION_H diff --git a/libc/include/signal.yaml b/libc/include/signal.yaml index 576e77576ac74..6fdd8c97ccbe2 100644 --- a/libc/include/signal.yaml +++ b/libc/include/signal.yaml @@ -3,12 +3,13 @@ header_template: signal.h.def macros: [] types: - type_name: pid_t - - type_name: stack_t + - type_name: sig_atomic_t + - type_name: sighandler_t - type_name: siginfo_t - - type_name: struct_sigaction - type_name: sigset_t + - type_name: stack_t + - type_name: struct_sigaction - type_name: union_sigval - - type_name: sig_atomic_t enums: [] objects: [] functions: @@ -69,10 +70,15 @@ functions: - name: signal standards: - stdc - return_type: __sighandler_t + # May the Geneva Convention have mercy on my soul... Why this insanity? + # Well: signal returns a function pointer to a function with no return + # value and which accepts an int. The parameter list appears on the far + # right of the declaration. i.e. + # void (*signal(int, void (*)(int)))(int); + return_type: void (* arguments: - type: int - - type: __sighandler_t + - type: void (*)(int)))(int - name: sigprocmask standards: - POSIX diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 0e57051807b33..14e63d6cc1395 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -534,7 +534,7 @@ add_entrypoint_object( libc.src.__support.macros.optimization libc.src.__support.macros.properties.types COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_entrypoint_object( diff --git a/libc/src/signal/linux/CMakeLists.txt b/libc/src/signal/linux/CMakeLists.txt index f7457d31cf4f8..c0dd61e473881 100644 --- a/libc/src/signal/linux/CMakeLists.txt +++ b/libc/src/signal/linux/CMakeLists.txt @@ -127,7 +127,6 @@ add_entrypoint_object( DEPENDS .sigaction libc.hdr.signal_macros - libc.hdr.types.sighandler_t ) add_entrypoint_object( diff --git a/libc/src/signal/linux/signal.cpp b/libc/src/signal/linux/signal.cpp index 1da0ef8c97a20..7c8ea16c6cd2e 100644 --- a/libc/src/signal/linux/signal.cpp +++ b/libc/src/signal/linux/signal.cpp @@ -8,14 +8,17 @@ #include "src/signal/signal.h" #include "hdr/signal_macros.h" -#include "hdr/types/sighandler_t.h" #include "src/__support/common.h" #include "src/__support/macros/config.h" #include "src/signal/sigaction.h" namespace LIBC_NAMESPACE_DECL { -LLVM_LIBC_FUNCTION(sighandler_t, signal, (int signum, sighandler_t handler)) { +// Our LLVM_LIBC_FUNCTION macro doesn't handle function pointer return types. +using signal_handler = void (*)(int); + +LLVM_LIBC_FUNCTION(signal_handler, signal, + (int signum, signal_handler handler)) { struct sigaction action, old; action.sa_handler = handler; action.sa_flags = SA_RESTART; diff --git a/libc/src/signal/signal.h b/libc/src/signal/signal.h index 06e77e11bf0bd..e1f31a8e126c5 100644 --- a/libc/src/signal/signal.h +++ b/libc/src/signal/signal.h @@ -9,12 +9,11 @@ #ifndef LLVM_LIBC_SRC_SIGNAL_SIGNAL_H #define LLVM_LIBC_SRC_SIGNAL_SIGNAL_H -#include "hdr/types/sighandler_t.h" #include "src/__support/macros/config.h" namespace LIBC_NAMESPACE_DECL { -sighandler_t signal(int signum, sighandler_t handler); +void (*signal(int signum, void (*handler)(int)))(int); } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/test/UnitTest/FPExceptMatcher.cpp b/libc/test/UnitTest/FPExceptMatcher.cpp index 119a06985b8f1..d66066023984e 100644 --- a/libc/test/UnitTest/FPExceptMatcher.cpp +++ b/libc/test/UnitTest/FPExceptMatcher.cpp @@ -37,7 +37,7 @@ static void sigfpeHandler(int sig) { } FPExceptMatcher::FPExceptMatcher(FunctionCaller *func) { - sighandler_t oldSIGFPEHandler = signal(SIGFPE, &sigfpeHandler); + auto *oldSIGFPEHandler = signal(SIGFPE, &sigfpeHandler); caughtExcept = false; fenv_t oldEnv; diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt index aeb8edf305d05..8d175e857fcd1 100644 --- a/libc/test/src/__support/CMakeLists.txt +++ b/libc/test/src/__support/CMakeLists.txt @@ -234,7 +234,7 @@ add_libc_test( libc.src.stdlib.srand libc.src.string.memset COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} UNIT_TEST_ONLY # Aligned Allocation is not supported in hermetic builds. ) diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt index bbcdf2363c1e2..f000ff6f3cf47 100644 --- a/libc/test/src/math/CMakeLists.txt +++ b/libc/test/src/math/CMakeLists.txt @@ -1597,7 +1597,7 @@ add_fp_unittest( libc.src.math.sqrtf libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( @@ -1613,7 +1613,7 @@ add_fp_unittest( libc.src.math.sqrt libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( @@ -1629,7 +1629,7 @@ add_fp_unittest( libc.src.math.sqrtl libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( diff --git a/libc/test/src/math/exhaustive/CMakeLists.txt b/libc/test/src/math/exhaustive/CMakeLists.txt index 423c3b7a8bfd1..b1927dbc19a3b 100644 --- a/libc/test/src/math/exhaustive/CMakeLists.txt +++ b/libc/test/src/math/exhaustive/CMakeLists.txt @@ -305,7 +305,7 @@ add_fp_unittest( SRCS hypotf_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS .exhaustive_test libc.src.math.hypotf diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt index e0cb531b40421..f3ecba3737e38 100644 --- a/libc/test/src/math/smoke/CMakeLists.txt +++ b/libc/test/src/math/smoke/CMakeLists.txt @@ -2993,7 +2993,7 @@ add_fp_unittest( DEPENDS libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( @@ -3007,7 +3007,7 @@ add_fp_unittest( DEPENDS libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( @@ -3021,7 +3021,7 @@ add_fp_unittest( DEPENDS libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( @@ -3038,7 +3038,7 @@ add_fp_unittest( libc.src.math.sqrtf128 libc.src.__support.FPUtil.generic.sqrt COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} ) add_fp_unittest( diff --git a/libc/test/src/signal/CMakeLists.txt b/libc/test/src/signal/CMakeLists.txt index a27f5b8f1000e..f86ce2ae96857 100644 --- a/libc/test/src/signal/CMakeLists.txt +++ b/libc/test/src/signal/CMakeLists.txt @@ -74,7 +74,6 @@ add_libc_unittest( SRCS signal_test.cpp DEPENDS - libc.hdr.types.sighandler_t libc.src.errno.errno libc.src.signal.raise libc.src.signal.signal diff --git a/libc/test/src/signal/signal_test.cpp b/libc/test/src/signal/signal_test.cpp index 4b57311eee2d8..bac9c3b8b68bb 100644 --- a/libc/test/src/signal/signal_test.cpp +++ b/libc/test/src/signal/signal_test.cpp @@ -13,14 +13,12 @@ #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include "hdr/types/sighandler_t.h" - using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; TEST(LlvmLibcSignal, Invalid) { LIBC_NAMESPACE::libc_errno = 0; - sighandler_t valid = +[](int) {}; + auto *valid = +[](int) {}; EXPECT_THAT((void *)LIBC_NAMESPACE::signal(0, valid), Fails(EINVAL, (void *)SIG_ERR)); EXPECT_THAT((void *)LIBC_NAMESPACE::signal(65, valid), diff --git a/libc/test/src/stdfix/CMakeLists.txt b/libc/test/src/stdfix/CMakeLists.txt index 90d20438edb4b..e4d4fc5b52558 100644 --- a/libc/test/src/stdfix/CMakeLists.txt +++ b/libc/test/src/stdfix/CMakeLists.txt @@ -14,7 +14,7 @@ foreach(suffix IN ITEMS hr r lr hk k lk) SRCS abs${suffix}_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.abs${suffix} libc.src.__support.fixed_point.fx_bits @@ -31,7 +31,7 @@ foreach(suffix IN ITEMS uhr ur ulr uhk uk) SRCS sqrt${suffix}_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.sqrt${suffix} libc.src.__support.CPP.bit @@ -52,7 +52,7 @@ foreach(suffix IN ITEMS hr r lr hk k lk uhr ur ulr uhk uk ulk) SRCS round${suffix}_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.round${suffix} libc.src.__support.fixed_point.fx_bits @@ -67,7 +67,7 @@ foreach(suffix IN ITEMS hr r lr hk k lk uhr ur ulr uhk uk ulk) SRCS ${suffix}bits_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.${suffix}bits libc.src.__support.CPP.bit @@ -84,7 +84,7 @@ add_libc_test( SRCS uhksqrtus_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.uhksqrtus libc.src.__support.CPP.bit @@ -103,7 +103,7 @@ add_libc_test( SRCS uksqrtui_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.uksqrtui libc.src.__support.CPP.bit @@ -122,7 +122,7 @@ add_libc_test( SRCS exphk_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.exphk libc.src.math.exp @@ -140,7 +140,7 @@ add_libc_test( SRCS expk_test.cpp COMPILE_OPTIONS - -O3 + ${libc_opt_high_flag} DEPENDS libc.src.stdfix.expk libc.src.math.exp diff --git a/libc/utils/MPFRWrapper/CMakeLists.txt b/libc/utils/MPFRWrapper/CMakeLists.txt index f7df9146c8d48..9ff7fa109ff97 100644 --- a/libc/utils/MPFRWrapper/CMakeLists.txt +++ b/libc/utils/MPFRWrapper/CMakeLists.txt @@ -34,7 +34,7 @@ if(LIBC_TESTS_CAN_USE_MPFR) _get_common_test_compile_options(compile_options "" "") # mpfr/gmp headers do not work with -ffreestanding flag. list(REMOVE_ITEM compile_options "-ffreestanding") - target_compile_options(libcMPFRWrapper PRIVATE -O3 ${compile_options}) + target_compile_options(libcMPFRWrapper PRIVATE ${libc_opt_high_flag} ${compile_options}) add_dependencies( libcMPFRWrapper libcMPCommon diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt index ff52153354e0a..8e3f5097ba84a 100644 --- a/libclc/CMakeLists.txt +++ b/libclc/CMakeLists.txt @@ -114,7 +114,7 @@ endforeach() if( TARGET llvm-spirv ) get_host_tool_path( llvm-spirv LLVM_SPIRV llvm-spirv_exe llvm-spirv_target ) else() - find_program( LLVM_SPIRV llvm-spirv PATHS ${LLVM_TOOLS_BINARY_DIR} NO_DEFAULT_PATH ) + find_program( LLVM_SPIRV llvm-spirv HINTS ${LLVM_TOOLS_BINARY_DIR} ) set( llvm-spirv_exe "${LLVM_SPIRV}" ) set( llvm-spirv_target ) endif() diff --git a/libcxx/include/__type_traits/conjunction.h b/libcxx/include/__type_traits/conjunction.h index 6b6717a50a468..ad9656acd47ec 100644 --- a/libcxx/include/__type_traits/conjunction.h +++ b/libcxx/include/__type_traits/conjunction.h @@ -10,6 +10,8 @@ #define _LIBCPP___TYPE_TRAITS_CONJUNCTION_H #include <__config> +#include <__type_traits/conditional.h> +#include <__type_traits/enable_if.h> #include <__type_traits/integral_constant.h> #include <__type_traits/is_same.h> @@ -19,29 +21,22 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct _AndImpl; +template +using __expand_to_true _LIBCPP_NODEBUG = true_type; -template <> -struct _AndImpl { - template - using _Result _LIBCPP_NODEBUG = - typename _AndImpl::template _Result<_First, _Rest...>; -}; +template +__expand_to_true<__enable_if_t<_Pred::value>...> __and_helper(int); -template <> -struct _AndImpl { - template - using _Result _LIBCPP_NODEBUG = _Res; -}; +template +false_type __and_helper(...); // _And always performs lazy evaluation of its arguments. // // However, `_And<_Pred...>` itself will evaluate its result immediately (without having to // be instantiated) since it is an alias, unlike `conjunction<_Pred...>`, which is a struct. // If you want to defer the evaluation of `_And<_Pred...>` itself, use `_Lazy<_And, _Pred...>`. -template -using _And _LIBCPP_NODEBUG = typename _AndImpl::template _Result; +template +using _And _LIBCPP_NODEBUG = decltype(std::__and_helper<_Pred...>(0)); template struct __all_dummy; @@ -51,11 +46,22 @@ struct __all : _IsSame<__all_dummy<_Pred...>, __all_dummy<((void)_Pred, true)... #if _LIBCPP_STD_VER >= 17 -template -struct _LIBCPP_NO_SPECIALIZATIONS conjunction : _And<_Args...> {}; +template +struct _LIBCPP_NO_SPECIALIZATIONS conjunction : true_type {}; + +_LIBCPP_DIAGNOSTIC_PUSH +# if __has_warning("-Winvalid-specialization") +_LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Winvalid-specialization") +# endif +template +struct conjunction<_Arg> : _Arg {}; + +template +struct conjunction<_Arg, _Args...> : conditional_t> {}; +_LIBCPP_DIAGNOSTIC_POP template -_LIBCPP_NO_SPECIALIZATIONS inline constexpr bool conjunction_v = _And<_Args...>::value; +_LIBCPP_NO_SPECIALIZATIONS inline constexpr bool conjunction_v = conjunction<_Args...>::value; #endif // _LIBCPP_STD_VER >= 17 diff --git a/libcxx/test/benchmarks/containers/container_benchmarks.h b/libcxx/test/benchmarks/containers/container_benchmarks.h deleted file mode 100644 index e24bd767177e8..0000000000000 --- a/libcxx/test/benchmarks/containers/container_benchmarks.h +++ /dev/null @@ -1,609 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H -#define TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H - -#include -#include -#include -#include // for std::from_range -#include -#include -#include - -#include "benchmark/benchmark.h" -#include "test_iterators.h" -#include "test_macros.h" -#include "../GenerateInput.h" - -namespace ContainerBenchmarks { - -template -void DoNotOptimizeData(Container& c) { - if constexpr (requires { c.data(); }) { - benchmark::DoNotOptimize(c.data()); - } else { - benchmark::DoNotOptimize(&c); - } -} - -// -// Sequence container operations -// -template -void BM_ctor_size(benchmark::State& st) { - auto size = st.range(0); - - for (auto _ : st) { - Container c(size); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -template -void BM_ctor_size_value(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - Container c(size, value); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -template -void BM_ctor_iter_iter(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - const auto begin = in.begin(); - const auto end = in.end(); - benchmark::DoNotOptimize(in); - - for (auto _ : st) { - Container c(begin, end); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -#if TEST_STD_VER >= 23 -template -void BM_ctor_from_range(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - benchmark::DoNotOptimize(in); - - for (auto _ : st) { - Container c(std::from_range, in); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} -#endif - -template -void BM_ctor_copy(benchmark::State& st, Generator gen) { - auto size = st.range(0); - Container in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - for (auto _ : st) { - Container c(in); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - DoNotOptimizeData(in); - } -} - -template -void BM_assignment(benchmark::State& st, Generator gen) { - auto size = st.range(0); - Container in1, in2; - std::generate_n(std::back_inserter(in1), size, gen); - std::generate_n(std::back_inserter(in2), size, gen); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - - // Assign from one of two containers in succession to avoid - // hitting a self-assignment corner-case - Container c(in1); - bool toggle = false; - for (auto _ : st) { - c = toggle ? in1 : in2; - toggle = !toggle; - DoNotOptimizeData(c); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - } -} - -// Benchmark Container::assign(input-iter, input-iter) when the container already contains -// the same number of elements that we're assigning. The intent is to check whether the -// implementation basically creates a new container from scratch or manages to reuse the -// pre-existing storage. -template -void BM_assign_input_iter_full(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - auto size = st.range(0); - std::vector in1, in2; - std::generate_n(std::back_inserter(in1), size, gen); - std::generate_n(std::back_inserter(in2), size, gen); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - - Container c(in1.begin(), in1.end()); - bool toggle = false; - for (auto _ : st) { - std::vector& in = toggle ? in1 : in2; - auto first = in.data(); - auto last = in.data() + in.size(); - c.assign(cpp17_input_iterator(first), cpp17_input_iterator(last)); - toggle = !toggle; - DoNotOptimizeData(c); - } -} - -template -void BM_insert_begin(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - c.insert(c.begin(), value); - DoNotOptimizeData(c); - - c.erase(std::prev(c.end())); // avoid growing indefinitely - } -} - -template - requires std::random_access_iterator -void BM_insert_middle(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - auto mid = c.begin() + (size / 2); // requires random-access iterators in order to make sense - c.insert(mid, value); - DoNotOptimizeData(c); - - c.erase(c.end() - 1); // avoid growing indefinitely - } -} - -// Insert at the start of a vector in a scenario where the vector already -// has enough capacity to hold all the elements we are inserting. -template -void BM_insert_begin_input_iter_with_reserve_no_realloc(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int small = 100; // arbitrary - Container c; - c.reserve(size + small); // ensure no reallocation - std::generate_n(std::back_inserter(c), small, gen); - - for (auto _ : st) { - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - - st.PauseTiming(); - c.erase(c.begin() + small, c.end()); // avoid growing indefinitely - st.ResumeTiming(); - } -} - -// Insert at the start of a vector in a scenario where the vector already -// has almost enough capacity to hold all the elements we are inserting, -// but does need to reallocate. -template -void BM_insert_begin_input_iter_with_reserve_almost_no_realloc(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int overflow = size / 10; // 10% of elements won't fit in the vector when we insert - Container c; - for (auto _ : st) { - st.PauseTiming(); - c = Container(); - c.reserve(size); - std::generate_n(std::back_inserter(c), overflow, gen); - st.ResumeTiming(); - - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - } -} - -// Insert at the start of a vector in a scenario where the vector can fit a few -// more elements, but needs to reallocate almost immediately to fit the remaining -// elements. -template -void BM_insert_begin_input_iter_with_reserve_near_full(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int overflow = 9 * (size / 10); // 90% of elements won't fit in the vector when we insert - Container c; - for (auto _ : st) { - st.PauseTiming(); - c = Container(); - c.reserve(size); - std::generate_n(std::back_inserter(c), overflow, gen); - st.ResumeTiming(); - - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - } -} - -template -void BM_erase_begin(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - c.erase(c.begin()); - DoNotOptimizeData(c); - - c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container - } -} - -template - requires std::random_access_iterator -void BM_erase_middle(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - auto mid = c.begin() + (size / 2); - c.erase(mid); - DoNotOptimizeData(c); - - c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container - } -} - -template -void BM_push_back(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c; - DoNotOptimizeData(c); - while (st.KeepRunningBatch(size)) { - c.clear(); - for (int i = 0; i != size; ++i) { - c.push_back(in[i]); - } - DoNotOptimizeData(c); - } -} - -template -void BM_push_back_with_reserve(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c; - c.reserve(size); - DoNotOptimizeData(c); - while (st.KeepRunningBatch(size)) { - c.clear(); - for (int i = 0; i != size; ++i) { - c.push_back(in[i]); - } - DoNotOptimizeData(c); - } -} - -template -void sequence_container_benchmarks(std::string container) { - using ValueType = typename Container::value_type; - - using Generator = ValueType (*)(); - Generator cheap = [] { return Generate::cheap(); }; - Generator expensive = [] { return Generate::expensive(); }; - auto tostr = [&](Generator gen) { return gen == cheap ? " (cheap elements)" : " (expensive elements)"; }; - std::vector generators; - generators.push_back(cheap); - if constexpr (!std::is_integral_v) { - generators.push_back(expensive); - } - - // constructors - if constexpr (std::is_constructible_v) { - // not all containers provide this one - benchmark::RegisterBenchmark(container + "::ctor(size)", BM_ctor_size)->Arg(1024); - } - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(size, value_type)" + tostr(gen), [=](auto& st) { - BM_ctor_size_value(st, gen); - })->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(Iterator, Iterator)" + tostr(gen), [=](auto& st) { - BM_ctor_iter_iter(st, gen); - })->Arg(1024); -#if TEST_STD_VER >= 23 - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(Range)" + tostr(gen), [=](auto& st) { - BM_ctor_from_range(st, gen); - })->Arg(1024); -#endif - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(const&)" + tostr(gen), [=](auto& st) { - BM_ctor_copy(st, gen); - })->Arg(1024); - - // assignment - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::operator=(const&)" + tostr(gen), [=](auto& st) { - BM_assignment(st, gen); - })->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::assign(input-iter, input-iter) (full container)" + tostr(gen), - [=](auto& st) { BM_assign_input_iter_full(st, gen); }) - ->Arg(1024); - - // insert - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::insert(begin)" + tostr(gen), [=](auto& st) { - BM_insert_begin(st, gen); - })->Arg(1024); - if constexpr (std::random_access_iterator) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::insert(middle)" + tostr(gen), [=](auto& st) { - BM_insert_middle(st, gen); - })->Arg(1024); - } - if constexpr (requires(Container c) { c.reserve(0); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (no realloc)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_no_realloc(st, gen); }) - ->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (half filled)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_almost_no_realloc(st, gen); }) - ->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (near full)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_near_full(st, gen); }) - ->Arg(1024); - } - - // erase - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::erase(begin)" + tostr(gen), [=](auto& st) { - BM_erase_begin(st, gen); - })->Arg(1024); - if constexpr (std::random_access_iterator) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::erase(middle)" + tostr(gen), [=](auto& st) { - BM_erase_middle(st, gen); - })->Arg(1024); - } - - // push_back (optional) - if constexpr (requires(Container c, ValueType v) { c.push_back(v); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::push_back()" + tostr(gen), [=](auto& st) { - BM_push_back(st, gen); - })->Arg(1024); - if constexpr (requires(Container c) { c.reserve(0); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::push_back() (with reserve)" + tostr(gen), [=](auto& st) { - BM_push_back_with_reserve(st, gen); - })->Arg(1024); - } - } -} - -// -// Misc operations -// -template -void BM_InsertValue(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - while (st.KeepRunning()) { - c.clear(); - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_InsertValueRehash(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - while (st.KeepRunning()) { - c.clear(); - c.rehash(16); - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_InsertDuplicate(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - benchmark::DoNotOptimize(in); - while (st.KeepRunning()) { - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_EmplaceDuplicate(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - benchmark::DoNotOptimize(in); - while (st.KeepRunning()) { - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.emplace(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_Find(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(&(*c.begin())); - const auto end = in.data() + in.size(); - while (st.KeepRunning()) { - for (auto it = in.data(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.find(*it))); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_FindRehash(benchmark::State& st, Container c, GenInputs gen) { - c.rehash(8); - auto in = gen(st.range(0)); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(&(*c.begin())); - const auto end = in.data() + in.size(); - while (st.KeepRunning()) { - for (auto it = in.data(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.find(*it))); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_Rehash(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - c.max_load_factor(3.0); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - const auto bucket_count = c.bucket_count(); - while (st.KeepRunning()) { - c.rehash(bucket_count + 1); - c.rehash(bucket_count); - benchmark::ClobberMemory(); - } -} - -template -void BM_Compare_same_container(benchmark::State& st, Container, GenInputs gen) { - auto in = gen(st.range(0)); - Container c1(in.begin(), in.end()); - Container c2 = c1; - - benchmark::DoNotOptimize(&(*c1.begin())); - benchmark::DoNotOptimize(&(*c2.begin())); - while (st.KeepRunning()) { - bool res = c1 == c2; - benchmark::DoNotOptimize(&res); - benchmark::ClobberMemory(); - } -} - -template -void BM_Compare_different_containers(benchmark::State& st, Container, GenInputs gen) { - auto in1 = gen(st.range(0)); - auto in2 = gen(st.range(0)); - Container c1(in1.begin(), in1.end()); - Container c2(in2.begin(), in2.end()); - - benchmark::DoNotOptimize(&(*c1.begin())); - benchmark::DoNotOptimize(&(*c2.begin())); - while (st.KeepRunning()) { - bool res = c1 == c2; - benchmark::DoNotOptimize(&res); - benchmark::ClobberMemory(); - } -} - -} // namespace ContainerBenchmarks - -#endif // TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H diff --git a/libcxx/test/benchmarks/containers/deque.bench.cpp b/libcxx/test/benchmarks/containers/sequence/deque.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/deque.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/deque.bench.cpp index 6a650fa4dce2a..e37c9fef4ac23 100644 --- a/libcxx/test/benchmarks/containers/deque.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/deque.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::deque"); - ContainerBenchmarks::sequence_container_benchmarks>("std::deque"); + support::sequence_container_benchmarks>("std::deque"); + support::sequence_container_benchmarks>("std::deque"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/libcxx/test/benchmarks/containers/list.bench.cpp b/libcxx/test/benchmarks/containers/sequence/list.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/list.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/list.bench.cpp index 2212affa02ba4..e40aae6cf9fa5 100644 --- a/libcxx/test/benchmarks/containers/list.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/list.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::list"); - ContainerBenchmarks::sequence_container_benchmarks>("std::list"); + support::sequence_container_benchmarks>("std::list"); + support::sequence_container_benchmarks>("std::list"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h b/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h new file mode 100644 index 0000000000000..dcd251d6997dd --- /dev/null +++ b/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h @@ -0,0 +1,455 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H +#define TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H + +#include +#include +#include +#include +#include // for std::from_range +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "test_iterators.h" +#include "../../GenerateInput.h" + +namespace support { + +template +void DoNotOptimizeData(Container& c) { + if constexpr (requires { c.data(); }) { + benchmark::DoNotOptimize(c.data()); + } else { + benchmark::DoNotOptimize(&c); + } +} + +template +void sequence_container_benchmarks(std::string container) { + using ValueType = typename Container::value_type; + + using Generator = ValueType (*)(); + Generator cheap = [] { return Generate::cheap(); }; + Generator expensive = [] { return Generate::expensive(); }; + auto tostr = [&](Generator gen) -> std::string { + return gen == cheap ? " (cheap elements)" : " (expensive elements)"; + }; + std::vector generators; + generators.push_back(cheap); + if constexpr (!std::is_integral_v) { + generators.push_back(expensive); + } + + // Some of these benchmarks are structured to perform the operation being benchmarked + // a small number of times at each iteration, in order to offset the cost of + // PauseTiming() and ResumeTiming(). + static constexpr std::size_t BatchSize = 32; + + auto bench = [&](std::string operation, auto f) { + benchmark::RegisterBenchmark(container + "::" + operation, f)->Arg(32)->Arg(1024)->Arg(8192); + }; + + ///////////////////////// + // Constructors + ///////////////////////// + if constexpr (std::is_constructible_v) { + // not all containers provide this constructor + bench("ctor(size)", [](auto& st) { + auto const size = st.range(0); + + for ([[maybe_unused]] auto _ : st) { + Container c(size); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + } + + for (auto gen : generators) + bench("ctor(size, value_type)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + Container c(size, value); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + + for (auto gen : generators) + bench("ctor(Iterator, Iterator)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + const auto begin = in.begin(); + const auto end = in.end(); + benchmark::DoNotOptimize(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(begin, end); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + +#if defined(__cpp_lib_containers_ranges) && __cpp_lib_containers_ranges >= 202202L + for (auto gen : generators) + bench("ctor(Range)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + benchmark::DoNotOptimize(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(std::from_range, in); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); +#endif + + for (auto gen : generators) + bench("ctor(const&)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + Container in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(in); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + DoNotOptimizeData(in); + } + }); + + ///////////////////////// + // Assignment + ///////////////////////// + for (auto gen : generators) + bench("operator=(const&)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + Container in1, in2; + std::generate_n(std::back_inserter(in1), size, gen); + std::generate_n(std::back_inserter(in2), size, gen); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + + // Assign from one of two containers in succession to avoid + // hitting a self-assignment corner-case + Container c(in1); + bool toggle = false; + for ([[maybe_unused]] auto _ : st) { + c = toggle ? in1 : in2; + toggle = !toggle; + DoNotOptimizeData(c); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + } + }); + + // Benchmark Container::assign(input-iter, input-iter) when the container already contains + // the same number of elements that we're assigning. The intent is to check whether the + // implementation basically creates a new container from scratch or manages to reuse the + // pre-existing storage. + for (auto gen : generators) + bench("assign(input-iter, input-iter) (full container)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in1, in2; + std::generate_n(std::back_inserter(in1), size, gen); + std::generate_n(std::back_inserter(in2), size, gen); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + + Container c(in1.begin(), in1.end()); + bool toggle = false; + for ([[maybe_unused]] auto _ : st) { + std::vector& in = toggle ? in1 : in2; + auto first = in.data(); + auto last = in.data() + in.size(); + c.assign(cpp17_input_iterator(first), cpp17_input_iterator(last)); + toggle = !toggle; + DoNotOptimizeData(c); + } + }); + + ///////////////////////// + // Insertion + ///////////////////////// + for (auto gen : generators) + bench("insert(begin)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + c.insert(c.begin(), value); + DoNotOptimizeData(c); + + c.erase(std::prev(c.end())); // avoid growing indefinitely + } + }); + + if constexpr (std::random_access_iterator) { + for (auto gen : generators) + bench("insert(middle)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + auto mid = c.begin() + (size / 2); // requires random-access iterators in order to make sense + c.insert(mid, value); + DoNotOptimizeData(c); + + c.erase(c.end() - 1); // avoid growing indefinitely + } + }); + } + + if constexpr (requires(Container c) { c.reserve(0); }) { + // Insert at the start of a vector in a scenario where the vector already + // has enough capacity to hold all the elements we are inserting. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (no realloc)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + const int small = 100; // arbitrary + Container c; + c.reserve(size + small); // ensure no reallocation + std::generate_n(std::back_inserter(c), small, gen); + + for ([[maybe_unused]] auto _ : st) { + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + + st.PauseTiming(); + c.erase(c.begin() + small, c.end()); // avoid growing indefinitely + st.ResumeTiming(); + } + }); + + // Insert at the start of a vector in a scenario where the vector already + // has almost enough capacity to hold all the elements we are inserting, + // but does need to reallocate. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (half filled)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + const int overflow = size / 10; // 10% of elements won't fit in the vector when we insert + Container c; + for ([[maybe_unused]] auto _ : st) { + st.PauseTiming(); + c = Container(); + c.reserve(size); + std::generate_n(std::back_inserter(c), overflow, gen); + st.ResumeTiming(); + + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + } + }); + + // Insert at the start of a vector in a scenario where the vector can fit a few + // more elements, but needs to reallocate almost immediately to fit the remaining + // elements. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (near full)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + auto const overflow = 9 * (size / 10); // 90% of elements won't fit in the vector when we insert + Container c; + for ([[maybe_unused]] auto _ : st) { + st.PauseTiming(); + c = Container(); + c.reserve(size); + std::generate_n(std::back_inserter(c), overflow, gen); + st.ResumeTiming(); + + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + } + }); + } + + ///////////////////////// + // Variations of push_back + ///////////////////////// + static constexpr bool has_push_back = requires(Container c, ValueType v) { c.push_back(v); }; + static constexpr bool has_capacity = requires(Container c) { c.capacity(); }; + static constexpr bool has_reserve = requires(Container c) { c.reserve(0); }; + if constexpr (has_push_back) { + if constexpr (has_capacity) { + // For containers where we can observe capacity(), push_back a single element + // without reserving to ensure the container needs to grow + for (auto gen : generators) + bench("push_back() (growing)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + auto at_capacity = [](Container c) { + while (c.size() < c.capacity()) + c.push_back(c.back()); + return c; + }; + + std::vector c(BatchSize, at_capacity(Container(in.begin(), in.end()))); + std::vector const original = c; + + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + c[i].push_back(in[i]); + DoNotOptimizeData(c[i]); + } + + st.PauseTiming(); + for (std::size_t i = 0; i != BatchSize; ++i) { + c[i] = at_capacity(Container(in.begin(), in.end())); + assert(c[i].size() == c[i].capacity()); + } + st.ResumeTiming(); + } + }); + } + + // For containers where we can reserve, push_back a single element after reserving to + // ensure the container doesn't grow + if constexpr (has_reserve) { + for (auto gen : generators) + bench("push_back() (with reserve)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + // Ensure the container has enough capacity + c.reserve(c.size() + BatchSize); + DoNotOptimizeData(c); + + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + c.push_back(in[i]); + } + DoNotOptimizeData(c); + + st.PauseTiming(); + c.erase(c.end() - BatchSize, c.end()); + st.ResumeTiming(); + } + }); + } + + // push_back many elements: this is amortized constant for std::vector but not all containers + for (auto gen : generators) + bench("push_back() (many elements)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c; + DoNotOptimizeData(c); + while (st.KeepRunningBatch(size)) { + for (int i = 0; i != size; ++i) { + c.push_back(in[i]); + } + DoNotOptimizeData(c); + + st.PauseTiming(); + c.clear(); + st.ResumeTiming(); + } + }); + } + + ///////////////////////// + // Erasure + ///////////////////////// + for (auto gen : generators) + bench("erase(begin)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + c.erase(c.begin()); + DoNotOptimizeData(c); + + c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container + } + }); + + if constexpr (std::random_access_iterator) { + for (auto gen : generators) + bench("erase(middle)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + auto mid = c.begin() + (size / 2); + c.erase(mid); + DoNotOptimizeData(c); + + c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container + } + }); + } +} + +} // namespace support + +#endif // TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H diff --git a/libcxx/test/benchmarks/containers/vector.bench.cpp b/libcxx/test/benchmarks/containers/sequence/vector.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/vector.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/vector.bench.cpp index eef23d2981642..599db1d90fa9a 100644 --- a/libcxx/test/benchmarks/containers/vector.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/vector.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::vector"); - ContainerBenchmarks::sequence_container_benchmarks>("std::vector"); + support::sequence_container_benchmarks>("std::vector"); + support::sequence_container_benchmarks>("std::vector"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/char_string.pass.cpp b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/char_string.pass.cpp index 2ca5788e6d8a3..68a250c593318 100644 --- a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/char_string.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/char_string.pass.cpp @@ -16,39 +16,32 @@ // basic_string&& // operator+(charT lhs, basic_string&& rhs); // constexpr since C++20 +#include #include #include -#include #include "test_macros.h" #include "min_allocator.h" #include "asan_testing.h" -template -TEST_CONSTEXPR_CXX20 void test0(typename S::value_type lhs, const S& rhs, const S& x) { - assert(lhs + rhs == x); - LIBCPP_ASSERT(is_string_asan_correct(lhs + rhs)); -} - -#if TEST_STD_VER >= 11 -template -TEST_CONSTEXPR_CXX20 void test1(typename S::value_type lhs, S&& rhs, const S& x) { - assert(lhs + std::move(rhs) == x); -} -#endif - template TEST_CONSTEXPR_CXX20 void test_string() { - test0('a', S(""), S("a")); - test0('a', S("12345"), S("a12345")); - test0('a', S("1234567890"), S("a1234567890")); - test0('a', S("12345678901234567890"), S("a12345678901234567890")); + const char* test_data[] = {"", "12345", "1234567890", "12345678901234567890"}; + const char* results[] = {"a", "a12345", "a1234567890", "a12345678901234567890"}; + + for (size_t i = 0; i != 4; ++i) { + { // operator+(value_type, const string&); + const S str(test_data[i]); + assert('a' + str == results[i]); + LIBCPP_ASSERT(is_string_asan_correct('a' + str)); + } #if TEST_STD_VER >= 11 - test1('a', S(""), S("a")); - test1('a', S("12345"), S("a12345")); - test1('a', S("1234567890"), S("a1234567890")); - test1('a', S("12345678901234567890"), S("a12345678901234567890")); + { // operator+(value_type, string&&); + S str(test_data[i]); + assert('a' + std::move(str) == results[i]); + } #endif + } } TEST_CONSTEXPR_CXX20 bool test() { @@ -63,7 +56,7 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 +#if TEST_STD_VER >= 20 static_assert(test()); #endif diff --git a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/pointer_string.pass.cpp b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/pointer_string.pass.cpp index fa14f657d8269..434f7292c149c 100644 --- a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/pointer_string.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/pointer_string.pass.cpp @@ -16,68 +16,49 @@ // basic_string&& // operator+(const charT* lhs, basic_string&& rhs); // constexpr since C++20 +#include #include #include -#include #include "test_macros.h" #include "min_allocator.h" -template -TEST_CONSTEXPR_CXX20 void test0(const typename S::value_type* lhs, const S& rhs, const S& x) { - assert(lhs + rhs == x); -} - -#if TEST_STD_VER >= 11 -template -TEST_CONSTEXPR_CXX20 void test1(const typename S::value_type* lhs, S&& rhs, const S& x) { - assert(lhs + std::move(rhs) == x); -} -#endif - template TEST_CONSTEXPR_CXX20 void test_string() { - test0("", S(""), S("")); - test0("", S("12345"), S("12345")); - test0("", S("1234567890"), S("1234567890")); - test0("", S("12345678901234567890"), S("12345678901234567890")); - test0("abcde", S(""), S("abcde")); - test0("abcde", S("12345"), S("abcde12345")); - test0("abcde", S("1234567890"), S("abcde1234567890")); - test0("abcde", S("12345678901234567890"), S("abcde12345678901234567890")); - test0("abcdefghij", S(""), S("abcdefghij")); - test0("abcdefghij", S("12345"), S("abcdefghij12345")); - test0("abcdefghij", S("1234567890"), S("abcdefghij1234567890")); - test0("abcdefghij", S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test0("abcdefghijklmnopqrst", S(""), S("abcdefghijklmnopqrst")); - test0("abcdefghijklmnopqrst", S("12345"), S("abcdefghijklmnopqrst12345")); - test0("abcdefghijklmnopqrst", S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test0("abcdefghijklmnopqrst", S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); + const char* test_data[2][4] = { + {"", "abcde", "abcdefghij", "abcdefghijklmnopqrst"}, {"", "12345", "1234567890", "12345678901234567890"}}; + + const char* results[4][4] = { + {"", "12345", "1234567890", "12345678901234567890"}, + {"abcde", "abcde12345", "abcde1234567890", "abcde12345678901234567890"}, + {"abcdefghij", "abcdefghij12345", "abcdefghij1234567890", "abcdefghij12345678901234567890"}, + {"abcdefghijklmnopqrst", + "abcdefghijklmnopqrst12345", + "abcdefghijklmnopqrst1234567890", + "abcdefghijklmnopqrst12345678901234567890"}}; + for (size_t i = 0; i != 4; ++i) { + for (size_t k = 0; k != 4; ++k) { + { // operator+(const value_type*, const string&); + const char* lhs = test_data[0][i]; + const S rhs(test_data[1][k]); + assert(lhs + rhs == results[i][k]); + } #if TEST_STD_VER >= 11 - test1("", S(""), S("")); - test1("", S("12345"), S("12345")); - test1("", S("1234567890"), S("1234567890")); - test1("", S("12345678901234567890"), S("12345678901234567890")); - test1("abcde", S(""), S("abcde")); - test1("abcde", S("12345"), S("abcde12345")); - test1("abcde", S("1234567890"), S("abcde1234567890")); - test1("abcde", S("12345678901234567890"), S("abcde12345678901234567890")); - test1("abcdefghij", S(""), S("abcdefghij")); - test1("abcdefghij", S("12345"), S("abcdefghij12345")); - test1("abcdefghij", S("1234567890"), S("abcdefghij1234567890")); - test1("abcdefghij", S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test1("abcdefghijklmnopqrst", S(""), S("abcdefghijklmnopqrst")); - test1("abcdefghijklmnopqrst", S("12345"), S("abcdefghijklmnopqrst12345")); - test1("abcdefghijklmnopqrst", S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test1("abcdefghijklmnopqrst", S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); + { // operator+(const value_type*, string&&); + const char* lhs = test_data[0][i]; + S rhs(test_data[1][k]); + assert(lhs + std::move(rhs) == results[i][k]); + } #endif + } + } } TEST_CONSTEXPR_CXX20 bool test() { test_string(); #if TEST_STD_VER >= 11 - test_string, min_allocator > >(); + test_string, min_allocator>>(); #endif return true; @@ -85,7 +66,7 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 +#if TEST_STD_VER >= 20 static_assert(test()); #endif diff --git a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_char.pass.cpp b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_char.pass.cpp index 9c0fb56e1f1c9..5c3989ae304e9 100644 --- a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_char.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_char.pass.cpp @@ -16,41 +16,32 @@ // basic_string&& // operator+(basic_string&& lhs, charT rhs); // constexpr since C++20 +#include #include #include -#include -#include "test_macros.h" -#include "min_allocator.h" #include "asan_testing.h" - -template -TEST_CONSTEXPR_CXX20 void test0(const S& lhs, typename S::value_type rhs, const S& x) { - assert(lhs + rhs == x); - LIBCPP_ASSERT(is_string_asan_correct(lhs + rhs)); -} - -#if TEST_STD_VER >= 11 -template -TEST_CONSTEXPR_CXX20 void test1(S&& lhs, typename S::value_type rhs, const S& x) { - assert(std::move(lhs) + rhs == x); -} -#endif +#include "min_allocator.h" +#include "test_macros.h" template TEST_CONSTEXPR_CXX20 void test_string() { - test0(S(""), '1', S("1")); - test0(S(""), '1', S("1")); - test0(S("abcde"), '1', S("abcde1")); - test0(S("abcdefghij"), '1', S("abcdefghij1")); - test0(S("abcdefghijklmnopqrst"), '1', S("abcdefghijklmnopqrst1")); + const char* test_data[] = {"", "12345", "1234567890", "12345678901234567890"}; + const char* results[] = {"a", "12345a", "1234567890a", "12345678901234567890a"}; + for (size_t i = 0; i != 4; ++i) { + { // operator+(const string&, value_type); + const S str(test_data[i]); + assert(str + 'a' == results[i]); + LIBCPP_ASSERT(is_string_asan_correct(str + 'a')); + } #if TEST_STD_VER >= 11 - test1(S(""), '1', S("1")); - test1(S("abcde"), '1', S("abcde1")); - test1(S("abcdefghij"), '1', S("abcdefghij1")); - test1(S("abcdefghijklmnopqrst"), '1', S("abcdefghijklmnopqrst1")); + { // operator+(string&&, value_type); + S str(test_data[i]); + assert(std::move(str) + 'a' == results[i]); + } #endif + } } TEST_CONSTEXPR_CXX20 bool test() { @@ -65,7 +56,7 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 +#if TEST_STD_VER >= 20 static_assert(test()); #endif diff --git a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_pointer.pass.cpp b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_pointer.pass.cpp index 3e1aaebf112eb..589c737348a11 100644 --- a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_pointer.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_pointer.pass.cpp @@ -16,13 +16,13 @@ // basic_string&& // operator+(basic_string&& lhs, const charT* rhs); // constexpr since C++20 +#include #include #include -#include -#include "test_macros.h" -#include "min_allocator.h" #include "asan_testing.h" +#include "min_allocator.h" +#include "test_macros.h" template TEST_CONSTEXPR_CXX20 void test0(const S& lhs, const typename S::value_type* rhs, const S& x) { @@ -39,40 +39,34 @@ TEST_CONSTEXPR_CXX20 void test1(S&& lhs, const typename S::value_type* rhs, cons template TEST_CONSTEXPR_CXX20 void test_string() { - test0(S(""), "", S("")); - test0(S(""), "12345", S("12345")); - test0(S(""), "1234567890", S("1234567890")); - test0(S(""), "12345678901234567890", S("12345678901234567890")); - test0(S("abcde"), "", S("abcde")); - test0(S("abcde"), "12345", S("abcde12345")); - test0(S("abcde"), "1234567890", S("abcde1234567890")); - test0(S("abcde"), "12345678901234567890", S("abcde12345678901234567890")); - test0(S("abcdefghij"), "", S("abcdefghij")); - test0(S("abcdefghij"), "12345", S("abcdefghij12345")); - test0(S("abcdefghij"), "1234567890", S("abcdefghij1234567890")); - test0(S("abcdefghij"), "12345678901234567890", S("abcdefghij12345678901234567890")); - test0(S("abcdefghijklmnopqrst"), "", S("abcdefghijklmnopqrst")); - test0(S("abcdefghijklmnopqrst"), "12345", S("abcdefghijklmnopqrst12345")); - test0(S("abcdefghijklmnopqrst"), "1234567890", S("abcdefghijklmnopqrst1234567890")); - test0(S("abcdefghijklmnopqrst"), "12345678901234567890", S("abcdefghijklmnopqrst12345678901234567890")); + const char* test_data[2][4] = { + {"", "abcde", "abcdefghij", "abcdefghijklmnopqrst"}, {"", "12345", "1234567890", "12345678901234567890"}}; + + const char* results[4][4] = { + {"", "12345", "1234567890", "12345678901234567890"}, + {"abcde", "abcde12345", "abcde1234567890", "abcde12345678901234567890"}, + {"abcdefghij", "abcdefghij12345", "abcdefghij1234567890", "abcdefghij12345678901234567890"}, + {"abcdefghijklmnopqrst", + "abcdefghijklmnopqrst12345", + "abcdefghijklmnopqrst1234567890", + "abcdefghijklmnopqrst12345678901234567890"}}; + + for (size_t i = 0; i != 4; ++i) { + for (size_t k = 0; k != 4; ++k) { + { // operator+(const value_type*, const string&); + const S lhs(test_data[0][i]); + const char* rhs = test_data[1][k]; + assert(lhs + rhs == results[i][k]); + } #if TEST_STD_VER >= 11 - test1(S(""), "", S("")); - test1(S(""), "12345", S("12345")); - test1(S(""), "1234567890", S("1234567890")); - test1(S(""), "12345678901234567890", S("12345678901234567890")); - test1(S("abcde"), "", S("abcde")); - test1(S("abcde"), "12345", S("abcde12345")); - test1(S("abcde"), "1234567890", S("abcde1234567890")); - test1(S("abcde"), "12345678901234567890", S("abcde12345678901234567890")); - test1(S("abcdefghij"), "", S("abcdefghij")); - test1(S("abcdefghij"), "12345", S("abcdefghij12345")); - test1(S("abcdefghij"), "1234567890", S("abcdefghij1234567890")); - test1(S("abcdefghij"), "12345678901234567890", S("abcdefghij12345678901234567890")); - test1(S("abcdefghijklmnopqrst"), "", S("abcdefghijklmnopqrst")); - test1(S("abcdefghijklmnopqrst"), "12345", S("abcdefghijklmnopqrst12345")); - test1(S("abcdefghijklmnopqrst"), "1234567890", S("abcdefghijklmnopqrst1234567890")); - test1(S("abcdefghijklmnopqrst"), "12345678901234567890", S("abcdefghijklmnopqrst12345678901234567890")); + { // operator+(const value_type*, string&&); + S lhs(test_data[0][i]); + const char* rhs = test_data[1][k]; + assert(std::move(lhs) + rhs == results[i][k]); + } #endif + } + } } TEST_CONSTEXPR_CXX20 bool test() { @@ -87,7 +81,7 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 +#if TEST_STD_VER >= 20 static_assert(test()); #endif diff --git a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_string.pass.cpp b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_string.pass.cpp index fcc8ceab87071..2badb51753395 100644 --- a/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_string.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.nonmembers/string_op+/string_string.pass.cpp @@ -28,114 +28,62 @@ // operator+(const basic_string&& lhs, // const basic_string&& rhs); // constexpr since C++20 +#include #include #include -#include -#include "test_macros.h" -#include "min_allocator.h" #include "asan_testing.h" - -template -TEST_CONSTEXPR_CXX20 void test0(const S& lhs, const S& rhs, const S& x) { - assert(lhs + rhs == x); - LIBCPP_ASSERT(is_string_asan_correct(lhs + rhs)); -} - -#if TEST_STD_VER >= 11 -template -TEST_CONSTEXPR_CXX20 void test1(S&& lhs, const S& rhs, const S& x) { - assert(std::move(lhs) + rhs == x); -} - -template -TEST_CONSTEXPR_CXX20 void test2(const S& lhs, S&& rhs, const S& x) { - assert(lhs + std::move(rhs) == x); -} - -template -TEST_CONSTEXPR_CXX20 void test3(S&& lhs, S&& rhs, const S& x) { - assert(std::move(lhs) + std::move(rhs) == x); -} -#endif +#include "min_allocator.h" +#include "test_macros.h" template TEST_CONSTEXPR_CXX20 void test_string() { - test0(S(""), S(""), S("")); - test0(S(""), S("12345"), S("12345")); - test0(S(""), S("1234567890"), S("1234567890")); - test0(S(""), S("12345678901234567890"), S("12345678901234567890")); - test0(S("abcde"), S(""), S("abcde")); - test0(S("abcde"), S("12345"), S("abcde12345")); - test0(S("abcde"), S("1234567890"), S("abcde1234567890")); - test0(S("abcde"), S("12345678901234567890"), S("abcde12345678901234567890")); - test0(S("abcdefghij"), S(""), S("abcdefghij")); - test0(S("abcdefghij"), S("12345"), S("abcdefghij12345")); - test0(S("abcdefghij"), S("1234567890"), S("abcdefghij1234567890")); - test0(S("abcdefghij"), S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test0(S("abcdefghijklmnopqrst"), S(""), S("abcdefghijklmnopqrst")); - test0(S("abcdefghijklmnopqrst"), S("12345"), S("abcdefghijklmnopqrst12345")); - test0(S("abcdefghijklmnopqrst"), S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test0(S("abcdefghijklmnopqrst"), S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); + const char* test_data[2][4] = { + {"", "abcde", "abcdefghij", "abcdefghijklmnopqrst"}, {"", "12345", "1234567890", "12345678901234567890"}}; + + const char* results[4][4] = { + {"", "12345", "1234567890", "12345678901234567890"}, + {"abcde", "abcde12345", "abcde1234567890", "abcde12345678901234567890"}, + {"abcdefghij", "abcdefghij12345", "abcdefghij1234567890", "abcdefghij12345678901234567890"}, + {"abcdefghijklmnopqrst", + "abcdefghijklmnopqrst12345", + "abcdefghijklmnopqrst1234567890", + "abcdefghijklmnopqrst12345678901234567890"}}; + + for (size_t i = 0; i != 4; ++i) { + for (size_t k = 0; k != 4; ++k) { + { // operator+(const string&, const string&); + const S lhs(test_data[0][i]); + const S rhs(test_data[1][k]); + assert(lhs + rhs == results[i][k]); + LIBCPP_ASSERT(is_string_asan_correct(lhs + rhs)); + } #if TEST_STD_VER >= 11 - test1(S(""), S(""), S("")); - test1(S(""), S("12345"), S("12345")); - test1(S(""), S("1234567890"), S("1234567890")); - test1(S(""), S("12345678901234567890"), S("12345678901234567890")); - test1(S("abcde"), S(""), S("abcde")); - test1(S("abcde"), S("12345"), S("abcde12345")); - test1(S("abcde"), S("1234567890"), S("abcde1234567890")); - test1(S("abcde"), S("12345678901234567890"), S("abcde12345678901234567890")); - test1(S("abcdefghij"), S(""), S("abcdefghij")); - test1(S("abcdefghij"), S("12345"), S("abcdefghij12345")); - test1(S("abcdefghij"), S("1234567890"), S("abcdefghij1234567890")); - test1(S("abcdefghij"), S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test1(S("abcdefghijklmnopqrst"), S(""), S("abcdefghijklmnopqrst")); - test1(S("abcdefghijklmnopqrst"), S("12345"), S("abcdefghijklmnopqrst12345")); - test1(S("abcdefghijklmnopqrst"), S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test1(S("abcdefghijklmnopqrst"), S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); - - test2(S(""), S(""), S("")); - test2(S(""), S("12345"), S("12345")); - test2(S(""), S("1234567890"), S("1234567890")); - test2(S(""), S("12345678901234567890"), S("12345678901234567890")); - test2(S("abcde"), S(""), S("abcde")); - test2(S("abcde"), S("12345"), S("abcde12345")); - test2(S("abcde"), S("1234567890"), S("abcde1234567890")); - test2(S("abcde"), S("12345678901234567890"), S("abcde12345678901234567890")); - test2(S("abcdefghij"), S(""), S("abcdefghij")); - test2(S("abcdefghij"), S("12345"), S("abcdefghij12345")); - test2(S("abcdefghij"), S("1234567890"), S("abcdefghij1234567890")); - test2(S("abcdefghij"), S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test2(S("abcdefghijklmnopqrst"), S(""), S("abcdefghijklmnopqrst")); - test2(S("abcdefghijklmnopqrst"), S("12345"), S("abcdefghijklmnopqrst12345")); - test2(S("abcdefghijklmnopqrst"), S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test2(S("abcdefghijklmnopqrst"), S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); - - test3(S(""), S(""), S("")); - test3(S(""), S("12345"), S("12345")); - test3(S(""), S("1234567890"), S("1234567890")); - test3(S(""), S("12345678901234567890"), S("12345678901234567890")); - test3(S("abcde"), S(""), S("abcde")); - test3(S("abcde"), S("12345"), S("abcde12345")); - test3(S("abcde"), S("1234567890"), S("abcde1234567890")); - test3(S("abcde"), S("12345678901234567890"), S("abcde12345678901234567890")); - test3(S("abcdefghij"), S(""), S("abcdefghij")); - test3(S("abcdefghij"), S("12345"), S("abcdefghij12345")); - test3(S("abcdefghij"), S("1234567890"), S("abcdefghij1234567890")); - test3(S("abcdefghij"), S("12345678901234567890"), S("abcdefghij12345678901234567890")); - test3(S("abcdefghijklmnopqrst"), S(""), S("abcdefghijklmnopqrst")); - test3(S("abcdefghijklmnopqrst"), S("12345"), S("abcdefghijklmnopqrst12345")); - test3(S("abcdefghijklmnopqrst"), S("1234567890"), S("abcdefghijklmnopqrst1234567890")); - test3(S("abcdefghijklmnopqrst"), S("12345678901234567890"), S("abcdefghijklmnopqrst12345678901234567890")); + { // operator+(string&&, const string&); + S lhs(test_data[0][i]); + const S rhs(test_data[1][k]); + assert(std::move(lhs) + rhs == results[i][k]); + } + { // operator+(const string&, string&&); + const S lhs(test_data[0][i]); + S rhs(test_data[1][k]); + assert(lhs + std::move(rhs) == results[i][k]); + } + { // operator+(string&&, string&&); + S lhs(test_data[0][i]); + S rhs(test_data[1][k]); + assert(std::move(lhs) + std::move(rhs) == results[i][k]); + } #endif + } + } } TEST_CONSTEXPR_CXX20 bool test() { test_string(); #if TEST_STD_VER >= 11 - test_string, min_allocator > >(); - test_string, safe_allocator > >(); + test_string, min_allocator>>(); + test_string, safe_allocator>>(); #endif return true; @@ -143,7 +91,7 @@ TEST_CONSTEXPR_CXX20 bool test() { int main(int, char**) { test(); -#if TEST_STD_VER > 17 +#if TEST_STD_VER >= 20 static_assert(test()); #endif diff --git a/libcxx/test/support/MinSequenceContainer.h b/libcxx/test/support/MinSequenceContainer.h index d0e29ae40c400..7fee4dd0fbdc1 100644 --- a/libcxx/test/support/MinSequenceContainer.h +++ b/libcxx/test/support/MinSequenceContainer.h @@ -31,7 +31,7 @@ struct MinSequenceContainer { const_iterator cbegin() const { return const_iterator(data_.data()); } iterator end() { return begin() + size(); } const_iterator end() const { return begin() + size(); } - size_type size() const { return data_.size(); } + size_type size() const { return static_cast(data_.size()); } bool empty() const { return data_.empty(); } void clear() { data_.clear(); } diff --git a/libcxx/test/support/min_allocator.h b/libcxx/test/support/min_allocator.h index 18f51f8072640..d3ee27a23bc89 100644 --- a/libcxx/test/support/min_allocator.h +++ b/libcxx/test/support/min_allocator.h @@ -394,15 +394,9 @@ class min_allocator template TEST_CONSTEXPR_CXX20 min_allocator(min_allocator) {} - TEST_CONSTEXPR_CXX20 pointer allocate(std::ptrdiff_t n) - { - return pointer(std::allocator().allocate(n)); - } + TEST_CONSTEXPR_CXX20 pointer allocate(std::size_t n) { return pointer(std::allocator().allocate(n)); } - TEST_CONSTEXPR_CXX20 void deallocate(pointer p, std::ptrdiff_t n) - { - std::allocator().deallocate(p.ptr_, n); - } + TEST_CONSTEXPR_CXX20 void deallocate(pointer p, std::size_t n) { std::allocator().deallocate(p.ptr_, n); } TEST_CONSTEXPR_CXX20 friend bool operator==(min_allocator, min_allocator) {return true;} TEST_CONSTEXPR_CXX20 friend bool operator!=(min_allocator x, min_allocator y) {return !(x == y);} diff --git a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp index 3831d8e071ef3..0923052b1b588 100644 --- a/libunwind/src/UnwindCursor.hpp +++ b/libunwind/src/UnwindCursor.hpp @@ -31,8 +31,9 @@ #endif #if defined(_LIBUNWIND_TARGET_LINUX) && \ - (defined(_LIBUNWIND_TARGET_AARCH64) || defined(_LIBUNWIND_TARGET_RISCV) || \ - defined(_LIBUNWIND_TARGET_S390X)) + (defined(_LIBUNWIND_TARGET_AARCH64) || \ + defined(_LIBUNWIND_TARGET_LOONGARCH) || \ + defined(_LIBUNWIND_TARGET_RISCV) || defined(_LIBUNWIND_TARGET_S390X)) #include #include #include @@ -996,6 +997,10 @@ class UnwindCursor : public AbstractUnwindCursor{ bool setInfoForSigReturn(Registers_arm64 &); int stepThroughSigReturn(Registers_arm64 &); #endif +#if defined(_LIBUNWIND_TARGET_LOONGARCH) + bool setInfoForSigReturn(Registers_loongarch &); + int stepThroughSigReturn(Registers_loongarch &); +#endif #if defined(_LIBUNWIND_TARGET_RISCV) bool setInfoForSigReturn(Registers_riscv &); int stepThroughSigReturn(Registers_riscv &); @@ -2815,6 +2820,61 @@ int UnwindCursor::stepThroughSigReturn() { #endif // defined(_LIBUNWIND_CHECK_LINUX_SIGRETURN) && // defined(_LIBUNWIND_TARGET_AARCH64) +#if defined(_LIBUNWIND_CHECK_LINUX_SIGRETURN) && \ + defined(_LIBUNWIND_TARGET_LOONGARCH) +template +bool UnwindCursor::setInfoForSigReturn(Registers_loongarch &) { + const pint_t pc = static_cast(getReg(UNW_REG_IP)); + // The PC might contain an invalid address if the unwind info is bad, so + // directly accessing it could cause a SIGSEGV. + if (!isReadableAddr(pc)) + return false; + const auto *instructions = reinterpret_cast(pc); + // Look for the two instructions used in the sigreturn trampoline + // __vdso_rt_sigreturn: + // + // 0x03822c0b li a7,0x8b + // 0x002b0000 syscall 0 + if (instructions[0] != 0x03822c0b || instructions[1] != 0x002b0000) + return false; + + _info = {}; + _info.start_ip = pc; + _info.end_ip = pc + 4; + _isSigReturn = true; + return true; +} + +template +int UnwindCursor::stepThroughSigReturn(Registers_loongarch &) { + // In the signal trampoline frame, sp points to an rt_sigframe[1], which is: + // - 128-byte siginfo struct + // - ucontext_t struct: + // - 8-byte long (__uc_flags) + // - 8-byte pointer (*uc_link) + // - 24-byte uc_stack + // - 8-byte uc_sigmask + // - 120-byte of padding to allow sigset_t to be expanded in the future + // - 8 bytes of padding because sigcontext has 16-byte alignment + // - struct sigcontext uc_mcontext + // [1] + // https://github.com/torvalds/linux/blob/master/arch/loongarch/kernel/signal.c + const pint_t kOffsetSpToSigcontext = 128 + 8 + 8 + 24 + 8 + 128; + + const pint_t sigctx = _registers.getSP() + kOffsetSpToSigcontext; + _registers.setIP(_addressSpace.get64(sigctx)); + for (int i = UNW_LOONGARCH_R1; i <= UNW_LOONGARCH_R31; ++i) { + // skip R0 + uint64_t value = + _addressSpace.get64(sigctx + static_cast((i + 1) * 8)); + _registers.setRegister(i, value); + } + _isSignalFrame = true; + return UNW_STEP_SUCCESS; +} +#endif // defined(_LIBUNWIND_CHECK_LINUX_SIGRETURN) && + // defined(_LIBUNWIND_TARGET_LOONGARCH) + #if defined(_LIBUNWIND_CHECK_LINUX_SIGRETURN) && \ defined(_LIBUNWIND_TARGET_RISCV) template diff --git a/libunwind/test/signal_unwind.pass.cpp b/libunwind/test/signal_unwind.pass.cpp index 1c1566415a4d4..4de271ecb886b 100644 --- a/libunwind/test/signal_unwind.pass.cpp +++ b/libunwind/test/signal_unwind.pass.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // Ensure that the unwinder can cope with the signal handler. -// REQUIRES: target={{(aarch64|riscv64|s390x|x86_64)-.+linux.*}} +// REQUIRES: target={{(aarch64|loongarch64|riscv64|s390x|x86_64)-.+linux.*}} // TODO: Figure out why this fails with Memory Sanitizer. // XFAIL: msan diff --git a/libunwind/test/unwind_leaffunction.pass.cpp b/libunwind/test/unwind_leaffunction.pass.cpp index 98de7dc43260c..d336c159c131b 100644 --- a/libunwind/test/unwind_leaffunction.pass.cpp +++ b/libunwind/test/unwind_leaffunction.pass.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // Ensure that leaf function can be unwund. -// REQUIRES: target={{(aarch64|riscv64|s390x|x86_64)-.+linux.*}} +// REQUIRES: target={{(aarch64|loongarch64|riscv64|s390x|x86_64)-.+linux.*}} // TODO: Figure out why this fails with Memory Sanitizer. // XFAIL: msan diff --git a/lld/COFF/MinGW.cpp b/lld/COFF/MinGW.cpp index a6407bc279200..62db04cbe507e 100644 --- a/lld/COFF/MinGW.cpp +++ b/lld/COFF/MinGW.cpp @@ -49,8 +49,7 @@ AutoExporter::AutoExporter( "libclang_rt.profile-x86_64", "libc++", "libc++abi", - "libFortranRuntime", - "libFortranDecimal", + "libflang_rt.runtime", "libunwind", "libmsvcrt", "libucrtbase", @@ -93,6 +92,7 @@ AutoExporter::AutoExporter( "__fmode", "_environ", "___dso_handle", + "__load_config_used", // These are the MinGW names that differ from the standard // ones (lacking an extra underscore). "_DllMain@12", @@ -110,6 +110,7 @@ AutoExporter::AutoExporter( "_fmode", "environ", "__dso_handle", + "_load_config_used", // These are the MinGW names that differ from the standard // ones (lacking an extra underscore). "DllMain", @@ -118,6 +119,10 @@ AutoExporter::AutoExporter( }; excludeSymbolPrefixes.insert("_head_"); } + if (symtab.isEC()) { + excludeSymbols.insert("__chpe_metadata"); + excludeSymbolPrefixes.insert("__os_arm64x_"); + } } void AutoExporter::addWholeArchive(StringRef path) { diff --git a/lld/test/COFF/arm64ec-patchable-thunks.test b/lld/test/COFF/arm64ec-patchable-thunks.test index 5cebe7cc27ad6..1e1ff1f7f2ee4 100644 --- a/lld/test/COFF/arm64ec-patchable-thunks.test +++ b/lld/test/COFF/arm64ec-patchable-thunks.test @@ -5,6 +5,7 @@ RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-patchable.s -o arm64e RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-alias.s -o arm64ec-alias.obj RUN: llvm-mc -filetype=obj -triple=arm64ec-windows test-sec.s -o test-sec.obj RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows %S/Inputs/loadconfig-arm64.s -o loadconfig-arm64.obj RUN: lld-link -out:test.dll -machine:arm64ec arm64ec-patchable.obj test-sec.obj loadconfig-arm64ec.obj -dll -noentry @@ -59,6 +60,18 @@ RUN: not lld-link -out:test4.dll -machine:arm64ec test-sec.obj loadconfig-arm64e ERR: error: undefined symbol: EXP+#patchable_func +RUN: lld-link -out:testx.dll -machine:arm64x arm64ec-patchable.obj test-sec.obj \ +RUN: loadconfig-arm64.obj loadconfig-arm64ec.obj -dll -noentry +RUN: llvm-objdump -d testx.dll | FileCheck -check-prefix=PATCH-DISASM %s +RUN: llvm-readobj --coff-load-config testx.dll | FileCheck -check-prefix=PATCH-CHPE %s + + +RUN: lld-link -out:testx2.dll -machine:arm64x arm64ec-alias.obj loadconfig-arm64.obj \ +RUN: loadconfig-arm64ec.obj -dll -noentry +RUN: llvm-objdump -d testx2.dll | FileCheck -check-prefix=PATCH-DISASM %s +RUN: llvm-readobj --coff-load-config testx2.dll | FileCheck -check-prefix=PATCH-CHPE %s + + #--- arm64ec-patchable.s .section ".text", "x", discard, "#patchable_func$hp_target" .globl "#patchable_func$hp_target" diff --git a/lld/test/COFF/arm64x-export-all.s b/lld/test/COFF/arm64x-export-all.s index 831edfe0b6f88..06ea9ec50259e 100644 --- a/lld/test/COFF/arm64x-export-all.s +++ b/lld/test/COFF/arm64x-export-all.s @@ -15,16 +15,11 @@ // EXP-NEXT: AddressSize: 64bit // EXP-NEXT: Export { // EXP-NEXT: Ordinal: 1 -// EXP-NEXT: Name: _load_config_used -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym // EXP-NEXT: RVA: 0x2000 // EXP-NEXT: } // EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 3 +// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym2 // EXP-NEXT: RVA: 0x2004 // EXP-NEXT: } @@ -34,31 +29,11 @@ // EXP-NEXT: AddressSize: 64bit // EXP-NEXT: Export { // EXP-NEXT: Ordinal: 1 -// EXP-NEXT: Name: __chpe_metadata -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 2 -// EXP-NEXT: Name: __os_arm64x_dispatch_icall -// EXP-NEXT: RVA: 0x12B0 -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 3 -// EXP-NEXT: Name: __os_arm64x_dispatch_ret -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 4 -// EXP-NEXT: Name: _load_config_used -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 5 // EXP-NEXT: Name: sym // EXP-NEXT: RVA: 0x2008 // EXP-NEXT: } // EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 6 +// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym2 // EXP-NEXT: RVA: 0x200C // EXP-NEXT: } diff --git a/lldb/include/lldb/Symbol/UnwindTable.h b/lldb/include/lldb/Symbol/UnwindTable.h index 29b7fa61b4849..3166fdec6ebaa 100644 --- a/lldb/include/lldb/Symbol/UnwindTable.h +++ b/lldb/include/lldb/Symbol/UnwindTable.h @@ -38,8 +38,9 @@ class UnwindTable { ArmUnwindInfo *GetArmUnwindInfo(); SymbolFile *GetSymbolFile(); - lldb::FuncUnwindersSP GetFuncUnwindersContainingAddress(const Address &addr, - SymbolContext &sc); + lldb::FuncUnwindersSP + GetFuncUnwindersContainingAddress(const Address &addr, + const SymbolContext &sc); bool GetAllowAssemblyEmulationUnwindPlans(); diff --git a/lldb/include/lldb/Target/LanguageRuntime.h b/lldb/include/lldb/Target/LanguageRuntime.h index 4a0214b04e235..f9ae2dc589632 100644 --- a/lldb/include/lldb/Target/LanguageRuntime.h +++ b/lldb/include/lldb/Target/LanguageRuntime.h @@ -105,12 +105,16 @@ class LanguageRuntime : public Runtime, public PluginInterface { "language doesn't support getting vtable information"); } - // this call should return true if it could set the name and/or the type - virtual bool GetDynamicTypeAndAddress(ValueObject &in_value, - lldb::DynamicValueType use_dynamic, - TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) = 0; + /// This call should return true if it could set the name and/or the type + /// Sets address to the address of the dynamic type if value_type is set to + /// a file or load address. Sets local_buffer to a buffer containing the data + /// of the dynamic type if value_type is set to a host address. Callers should + /// copy local_buffer over into their own buffer if they want to keep the data + /// alive. + virtual bool GetDynamicTypeAndAddress( + ValueObject &in_value, lldb::DynamicValueType use_dynamic, + TypeAndOrName &class_type_or_name, Address &address, + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) = 0; // This call should return a CompilerType given a generic type name and an // ExecutionContextScope in which one can actually fetch any specialization diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h index a184e6dd891af..c3622a29bc772 100644 --- a/lldb/include/lldb/Target/Process.h +++ b/lldb/include/lldb/Target/Process.h @@ -1491,10 +1491,11 @@ class Process : public std::enable_shared_from_this, /// otherwise. virtual bool IsAlive(); + /// Check if a process is a live debug session, or a corefile/post-mortem. virtual bool IsLiveDebugSession() const { return true; }; /// Provide a way to retrieve the core dump file that is loaded for debugging. - /// Only available if IsLiveDebugSession() returns true. + /// Only available if IsLiveDebugSession() returns false. /// /// \return /// File path to the core file. diff --git a/lldb/include/lldb/ValueObject/ValueObject.h b/lldb/include/lldb/ValueObject/ValueObject.h index 4f77384bb8f13..a0f53d20327cd 100644 --- a/lldb/include/lldb/ValueObject/ValueObject.h +++ b/lldb/include/lldb/ValueObject/ValueObject.h @@ -865,6 +865,19 @@ class ValueObject { virtual void SetLanguageFlags(uint64_t flags) { m_language_flags = flags; } + /// Returns the local buffer that this ValueObject points to if it's + /// available. + /// \return + /// The local buffer if this value object's value points to a + /// host address, and if that buffer can be determined. Otherwise, returns + /// an empty ArrayRef. + /// + /// TODO: Because a ValueObject's Value can point to any arbitrary memory + /// location, it is possible that we can't find what what buffer we're + /// pointing to, and thus also can't know its size. See the comment in + /// Value::m_value for a more thorough explanation of why that is. + llvm::ArrayRef GetLocalBuffer() const; + protected: typedef ClusterManager ValueObjectManager; diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp index d0092c237b4c9..da50fe04fa2b6 100644 --- a/lldb/source/Commands/CommandObjectTarget.cpp +++ b/lldb/source/Commands/CommandObjectTarget.cpp @@ -3474,6 +3474,17 @@ class CommandObjectTargetModulesShowUnwind : public CommandObjectParsed { m_type = eLookupTypeFunctionOrSymbol; break; + case 'c': + bool value, success; + value = OptionArgParser::ToBoolean(option_arg, false, &success); + if (success) { + m_cached = value; + } else { + return Status::FromErrorStringWithFormatv( + "invalid boolean value '%s' passed for -c option", option_arg); + } + break; + default: llvm_unreachable("Unimplemented option"); } @@ -3485,6 +3496,7 @@ class CommandObjectTargetModulesShowUnwind : public CommandObjectParsed { m_type = eLookupTypeInvalid; m_str.clear(); m_addr = LLDB_INVALID_ADDRESS; + m_cached = false; } llvm::ArrayRef GetDefinitions() override { @@ -3497,6 +3509,7 @@ class CommandObjectTargetModulesShowUnwind : public CommandObjectParsed { // parsing options std::string m_str; // Holds name lookup lldb::addr_t m_addr = LLDB_INVALID_ADDRESS; // Holds the address to lookup + bool m_cached = true; }; CommandObjectTargetModulesShowUnwind(CommandInterpreter &interpreter) @@ -3583,9 +3596,12 @@ class CommandObjectTargetModulesShowUnwind : public CommandObjectParsed { if (abi) start_addr = abi->FixCodeAddress(start_addr); - FuncUnwindersSP func_unwinders_sp( - sc.module_sp->GetUnwindTable() - .GetUncachedFuncUnwindersContainingAddress(start_addr, sc)); + UnwindTable &uw_table = sc.module_sp->GetUnwindTable(); + FuncUnwindersSP func_unwinders_sp = + m_options.m_cached + ? uw_table.GetFuncUnwindersContainingAddress(start_addr, sc) + : uw_table.GetUncachedFuncUnwindersContainingAddress(start_addr, + sc); if (!func_unwinders_sp) continue; diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td index 777f8c36c4916..8831fed38435b 100644 --- a/lldb/source/Commands/Options.td +++ b/lldb/source/Commands/Options.td @@ -965,6 +965,8 @@ let Command = "target modules show unwind" in { def target_modules_show_unwind_address : Option<"address", "a">, Group<2>, Arg<"AddressOrExpression">, Desc<"Show unwind instructions for a function " "or symbol containing an address">; + def target_modules_show_unwind_cached : Option<"cached", "c">, + Arg<"Boolean">, Desc<"Show cached unwind information">; } let Command = "target modules lookup" in { diff --git a/lldb/source/Interpreter/OptionArgParser.cpp b/lldb/source/Interpreter/OptionArgParser.cpp index 800f22b6169dc..2d393a57452ee 100644 --- a/lldb/source/Interpreter/OptionArgParser.cpp +++ b/lldb/source/Interpreter/OptionArgParser.cpp @@ -262,8 +262,10 @@ OptionArgParser::DoToAddress(const ExecutionContext *exe_ctx, llvm::StringRef s, // 3: The symbol/reg name if there is an offset // 4: +/- // 5: The offset value. + // clang-format off static RegularExpression g_symbol_plus_offset_regex( - "^(\\$[^ +-]+)|(([^ +-]+)([-\\+])[[:space:]]*(0x[0-9A-Fa-f]+|[0-9]+)[[:space:]]*)$"); + "^(\\$[^ +-]+)|(([^ +-]+)[[:space:]]*([-\\+])[[:space:]]*(0x[0-9A-Fa-f]+|[0-9]+)[[:space:]]*)$"); + // clang-format on llvm::SmallVector matches; if (g_symbol_plus_offset_regex.Execute(sref, &matches)) { diff --git a/lldb/source/Plugins/ABI/X86/ABISysV_x86_64.cpp b/lldb/source/Plugins/ABI/X86/ABISysV_x86_64.cpp index 54028b1b3261a..83b01b14aedc5 100644 --- a/lldb/source/Plugins/ABI/X86/ABISysV_x86_64.cpp +++ b/lldb/source/Plugins/ABI/X86/ABISysV_x86_64.cpp @@ -79,6 +79,7 @@ ABISysV_x86_64::CreateInstance(lldb::ProcessSP process_sp, const ArchSpec &arch) case llvm::Triple::OSType::IOS: case llvm::Triple::OSType::TvOS: case llvm::Triple::OSType::WatchOS: + case llvm::Triple::OSType::XROS: switch (os_env) { case llvm::Triple::EnvironmentType::MacABI: case llvm::Triple::EnvironmentType::Simulator: diff --git a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderDarwin.cpp b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderDarwin.cpp index b5cf0d62b976f..14d05a1a4494c 100644 --- a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderDarwin.cpp +++ b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderDarwin.cpp @@ -419,6 +419,8 @@ bool DynamicLoaderDarwin::JSONImageInformationIntoImageInfo( image_infos[i].os_type = llvm::Triple::WatchOS; else if (os_name == "bridgeos") image_infos[i].os_type = llvm::Triple::BridgeOS; + else if (os_name == "xros") + image_infos[i].os_type = llvm::Triple::XROS; else if (os_name == "maccatalyst") { image_infos[i].os_type = llvm::Triple::IOS; image_infos[i].os_env = llvm::Triple::MacABI; @@ -431,6 +433,9 @@ bool DynamicLoaderDarwin::JSONImageInformationIntoImageInfo( } else if (os_name == "watchossimulator") { image_infos[i].os_type = llvm::Triple::WatchOS; image_infos[i].os_env = llvm::Triple::Simulator; + } else if (os_name == "xrsimulator") { + image_infos[i].os_type = llvm::Triple::XROS; + image_infos[i].os_env = llvm::Triple::Simulator; } } if (image->HasKey("min_version_os_sdk")) { @@ -765,7 +770,8 @@ bool DynamicLoaderDarwin::AddModulesUsingPreloadedModules( (dyld_triple.getEnvironment() == llvm::Triple::Simulator && (dyld_triple.getOS() == llvm::Triple::IOS || dyld_triple.getOS() == llvm::Triple::TvOS || - dyld_triple.getOS() == llvm::Triple::WatchOS))) + dyld_triple.getOS() == llvm::Triple::WatchOS || + dyld_triple.getOS() == llvm::Triple::XROS))) image_module_sp->MergeArchitecture(dyld_spec); } } @@ -835,7 +841,7 @@ lldb_private::ArchSpec DynamicLoaderDarwin::ImageInfo::GetArchitecture() const { } if (os_env == llvm::Triple::Simulator && (os_type == llvm::Triple::IOS || os_type == llvm::Triple::TvOS || - os_type == llvm::Triple::WatchOS)) { + os_type == llvm::Triple::WatchOS || os_type == llvm::Triple::XROS)) { llvm::Triple triple(llvm::Twine(arch_spec.GetArchitectureName()) + "-apple-" + llvm::Triple::getOSTypeName(os_type) + min_version_os_sdk + "-simulator"); @@ -1208,35 +1214,46 @@ DynamicLoaderDarwin::GetThreadLocalData(const lldb::ModuleSP module_sp, bool DynamicLoaderDarwin::UseDYLDSPI(Process *process) { Log *log = GetLog(LLDBLog::DynamicLoader); - bool use_new_spi_interface = false; + bool use_new_spi_interface = true; llvm::VersionTuple version = process->GetHostOSVersion(); if (!version.empty()) { - const llvm::Triple::OSType os_type = + using namespace llvm; + const Triple::OSType os_type = process->GetTarget().GetArchitecture().GetTriple().getOS(); - // macOS 10.12 and newer - if (os_type == llvm::Triple::MacOSX && - version >= llvm::VersionTuple(10, 12)) - use_new_spi_interface = true; + auto OlderThan = [os_type, version](llvm::Triple::OSType o, + llvm::VersionTuple v) -> bool { + return os_type == o && version < v; + }; + + if (OlderThan(Triple::MacOSX, VersionTuple(10, 12))) + use_new_spi_interface = false; - // iOS 10 and newer - if (os_type == llvm::Triple::IOS && version >= llvm::VersionTuple(10)) - use_new_spi_interface = true; + if (OlderThan(Triple::IOS, VersionTuple(10))) + use_new_spi_interface = false; - // tvOS 10 and newer - if (os_type == llvm::Triple::TvOS && version >= llvm::VersionTuple(10)) - use_new_spi_interface = true; + if (OlderThan(Triple::TvOS, VersionTuple(10))) + use_new_spi_interface = false; - // watchOS 3 and newer - if (os_type == llvm::Triple::WatchOS && version >= llvm::VersionTuple(3)) - use_new_spi_interface = true; + if (OlderThan(Triple::WatchOS, VersionTuple(3))) + use_new_spi_interface = false; - // NEED_BRIDGEOS_TRIPLE // Any BridgeOS - // NEED_BRIDGEOS_TRIPLE if (os_type == llvm::Triple::BridgeOS) - // NEED_BRIDGEOS_TRIPLE use_new_spi_interface = true; + // llvm::Triple::BridgeOS and llvm::Triple::XROS always use the new + // libdyld SPI interface. + } else { + // We could not get an OS version string, we are likely not + // connected to debugserver and the packets to call the libdyld SPI + // will not exist. + use_new_spi_interface = false; } + // Corefiles cannot use the libdyld SPI to get the inferior's + // binaries, we must find it through metadata or a scan + // of the corefile memory. + if (!process->IsLiveDebugSession()) + use_new_spi_interface = false; + if (log) { if (use_new_spi_interface) LLDB_LOGF( diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp index 66cdab1307ce9..8faf7135217ac 100644 --- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp +++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp @@ -289,7 +289,7 @@ llvm::Expected bool ItaniumABILanguageRuntime::GetDynamicTypeAndAddress( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &dynamic_address, - Value::ValueType &value_type) { + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) { // For Itanium, if the type has a vtable pointer in the object, it will be at // offset 0 in the object. That will point to the "address point" within the // vtable (not the beginning of the vtable.) We can then look up the symbol diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h index 0f7e73cfee075..7abf2f8547cd5 100644 --- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h +++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h @@ -54,8 +54,8 @@ class ItaniumABILanguageRuntime : public lldb_private::CPPLanguageRuntime { bool GetDynamicTypeAndAddress(ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) override; + Address &address, Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override; TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name, ValueObject &static_value) override; diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.cpp index ceee19c136d25..ad60290382c02 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.cpp @@ -276,7 +276,7 @@ bool AppleObjCRuntime::CouldHaveDynamicValue(ValueObject &in_value) { bool AppleObjCRuntime::GetDynamicTypeAndAddress( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address, - Value::ValueType &value_type) { + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) { return false; } diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.h b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.h index da58d44db19a8..425a608d65c2c 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.h +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntime.h @@ -54,8 +54,8 @@ class AppleObjCRuntime : public lldb_private::ObjCLanguageRuntime { bool GetDynamicTypeAndAddress(ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) override; + Address &address, Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override; TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name, ValueObject &static_value) override; diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp index 93168c23f3547..db1317d70d060 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp @@ -48,7 +48,7 @@ AppleObjCRuntimeV1::AppleObjCRuntimeV1(Process *process) bool AppleObjCRuntimeV1::GetDynamicTypeAndAddress( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address, - Value::ValueType &value_type) { + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) { class_type_or_name.Clear(); value_type = Value::ValueType::Scalar; if (CouldHaveDynamicValue(in_value)) { diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.h b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.h index 46d8e89c906e3..c51ac24e690b8 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.h +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.h @@ -100,8 +100,8 @@ class AppleObjCRuntimeV1 : public AppleObjCRuntime { bool GetDynamicTypeAndAddress(ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) override; + Address &address, Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override; llvm::Expected> CreateObjectChecker(std::string, ExecutionContext &exe_ctx) override; diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp index c43871b08191d..a57099f3df454 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp @@ -770,7 +770,7 @@ AppleObjCRuntimeV2::GetPreferredLanguageRuntime(ValueObject &in_value) { bool AppleObjCRuntimeV2::GetDynamicTypeAndAddress( ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address, - Value::ValueType &value_type) { + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) { // We should never get here with a null process... assert(m_process != nullptr); diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h index 2422539b13f13..79840f9be79b3 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h @@ -53,8 +53,8 @@ class AppleObjCRuntimeV2 : public AppleObjCRuntime { bool GetDynamicTypeAndAddress(ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) override; + Address &address, Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override; llvm::Expected> CreateObjectChecker(std::string name, ExecutionContext &exe_ctx) override; diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp index d6ffb03ab55e2..a4b3e26474a55 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp @@ -127,7 +127,7 @@ bool GNUstepObjCRuntime::CouldHaveDynamicValue(ValueObject &in_value) { bool GNUstepObjCRuntime::GetDynamicTypeAndAddress( ValueObject &in_value, DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, Address &address, - Value::ValueType &value_type) { + Value::ValueType &value_type, llvm::ArrayRef &local_buffer) { return false; } diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.h b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.h index de24466ebb003..94a5c9e1261a8 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.h +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.h @@ -67,8 +67,8 @@ class GNUstepObjCRuntime : public lldb_private::ObjCLanguageRuntime { bool GetDynamicTypeAndAddress(ValueObject &in_value, lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name, - Address &address, - Value::ValueType &value_type) override; + Address &address, Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override; TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name, ValueObject &static_value) override; diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp index bf2d293d2012c..4b69fa6e2bfb2 100644 --- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp +++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp @@ -2848,7 +2848,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { "DSC unmapped local symbol[{0}] has invalid " "string table offset {1:x} in {2}, ignoring symbol", nlist_index, nlist.n_strx, - module_sp->GetFileSpec().GetPath()); + module_sp->GetFileSpec().GetPath())); continue; } if (symbol_name[0] == '\0') @@ -6557,9 +6557,8 @@ bool ObjectFileMachO::SaveCore(const lldb::ProcessSP &process_sp, target_triple.getOS() == llvm::Triple::IOS || target_triple.getOS() == llvm::Triple::WatchOS || target_triple.getOS() == llvm::Triple::TvOS || + target_triple.getOS() == llvm::Triple::BridgeOS || target_triple.getOS() == llvm::Triple::XROS)) { - // NEED_BRIDGEOS_TRIPLE target_triple.getOS() == llvm::Triple::BridgeOS)) - // { bool make_core = false; switch (target_arch.GetMachine()) { case llvm::Triple::aarch64: diff --git a/lldb/source/Plugins/Platform/MacOSX/PlatformDarwinKernel.cpp b/lldb/source/Plugins/Platform/MacOSX/PlatformDarwinKernel.cpp index 6f75e5ea70b6a..605e3d5704969 100644 --- a/lldb/source/Plugins/Platform/MacOSX/PlatformDarwinKernel.cpp +++ b/lldb/source/Plugins/Platform/MacOSX/PlatformDarwinKernel.cpp @@ -126,6 +126,7 @@ PlatformSP PlatformDarwinKernel::CreateInstance(bool force, case llvm::Triple::MacOSX: case llvm::Triple::IOS: case llvm::Triple::WatchOS: + case llvm::Triple::XROS: case llvm::Triple::TvOS: case llvm::Triple::BridgeOS: break; @@ -329,6 +330,8 @@ void PlatformDarwinKernel::CollectKextAndKernelDirectories() { "/Platforms/AppleTVOS.platform/Developer/SDKs"); AddSDKSubdirsToSearchPaths(developer_dir + "/Platforms/WatchOS.platform/Developer/SDKs"); + AddSDKSubdirsToSearchPaths(developer_dir + + "/Platforms/XROS.platform/Developer/SDKs"); AddSDKSubdirsToSearchPaths(developer_dir + "/Platforms/BridgeOS.platform/Developer/SDKs"); } diff --git a/lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_loongarch64.cpp b/lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_loongarch64.cpp index b04018ee243fd..c4841950f1e07 100644 --- a/lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_loongarch64.cpp +++ b/lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_loongarch64.cpp @@ -27,18 +27,46 @@ // struct iovec definition #include +// LoongArch SIMD eXtension registers #ifndef NT_LOONGARCH_LSX -#define NT_LOONGARCH_LSX 0xa02 /* LoongArch SIMD eXtension registers */ +#define NT_LOONGARCH_LSX 0xa02 #endif +// LoongArch Advanced SIMD eXtension registers #ifndef NT_LOONGARCH_LASX -#define NT_LOONGARCH_LASX \ - 0xa03 /* LoongArch Advanced SIMD eXtension registers */ +#define NT_LOONGARCH_LASX 0xa03 +#endif + +// LoongArch hardware breakpoint registers +#ifndef NT_LOONGARCH_HW_BREAK +#define NT_LOONGARCH_HW_BREAK 0xa05 +#endif + +// LoongArch hardware watchpoint registers +#ifndef NT_LOONGARCH_HW_WATCH +#define NT_LOONGARCH_HW_WATCH 0xa06 #endif #define REG_CONTEXT_SIZE \ (GetGPRSize() + GetFPRSize() + sizeof(m_lsx) + sizeof(m_lasx)) +// ptrace has a struct type user_watch_state, which was replaced by +// user_watch_state_v2 when more watchpoints were added, so this file +// may be built on systems with one or both in the system headers. +// The type below has the same layout as user_watch_state_v2 but will +// not clash with that name if it exists. We can use the v2 layout even +// on old kernels as we will only see 8 watchpoints and the kernel will +// truncate any extra data we send to it. +struct loongarch_user_watch_state { + uint64_t dbg_info; + struct { + uint64_t addr; + uint64_t mask; + uint32_t ctrl; + uint32_t pad; + } dbg_regs[14]; +}; + using namespace lldb; using namespace lldb_private; using namespace lldb_private::process_linux; @@ -528,7 +556,7 @@ llvm::Error NativeRegisterContextLinux_loongarch64::ReadHardwareDebugInfo() { int regset = NT_LOONGARCH_HW_WATCH; struct iovec ioVec; - struct user_watch_state dreg_state; + struct loongarch_user_watch_state dreg_state; Status error; ioVec.iov_base = &dreg_state; @@ -556,7 +584,7 @@ llvm::Error NativeRegisterContextLinux_loongarch64::ReadHardwareDebugInfo() { llvm::Error NativeRegisterContextLinux_loongarch64::WriteHardwareDebugRegs( DREGType hwbType) { struct iovec ioVec; - struct user_watch_state dreg_state; + struct loongarch_user_watch_state dreg_state; int regset; memset(&dreg_state, 0, sizeof(dreg_state)); diff --git a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerCommon.cpp b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerCommon.cpp index c2fe05cad566e..67ba42f33d1dd 100644 --- a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerCommon.cpp +++ b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerCommon.cpp @@ -212,6 +212,8 @@ GDBRemoteCommunicationServerCommon::Handle_qHostInfo( response.PutCString("ostype:tvos;"); #elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 response.PutCString("ostype:watchos;"); +#elif defined(TARGET_OS_XR) && TARGET_OS_XR == 1 + response.PutCString("ostype:xros;"); #elif defined(TARGET_OS_BRIDGE) && TARGET_OS_BRIDGE == 1 response.PutCString("ostype:bridgeos;"); #else diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp index 05625925d7cae..ec0004c70c6da 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp @@ -492,6 +492,10 @@ ParsedDWARFTypeAttributes::ParsedDWARFTypeAttributes(const DWARFDIE &die) { case DW_AT_reference: ref_qual = clang::RQ_LValue; break; + case DW_AT_APPLE_enum_kind: + enum_kind = static_cast( + form_value.Unsigned()); + break; } } } @@ -1001,9 +1005,10 @@ TypeSP DWARFASTParserClang::ParseEnum(const SymbolContext &sc, } CompilerType clang_type = m_ast.CreateEnumerationType( - attrs.name.GetStringRef(), GetClangDeclContextContainingDIE(def_die, nullptr), + attrs.name.GetStringRef(), + GetClangDeclContextContainingDIE(def_die, nullptr), GetOwningClangModule(def_die), attrs.decl, enumerator_clang_type, - attrs.is_scoped_enum); + attrs.is_scoped_enum, attrs.enum_kind); TypeSP type_sp = dwarf->MakeType(def_die.GetID(), attrs.name, attrs.byte_size, nullptr, attrs.type.Reference().GetID(), Type::eEncodingIsUID, @@ -3190,7 +3195,7 @@ void DWARFASTParserClang::ParseChildParameters( } } - assert(function_param_names.size() == function_param_names.size()); + assert(function_param_names.size() == function_param_types.size()); } clang::Decl *DWARFASTParserClang::GetClangDeclForDIE(const DWARFDIE &die) { diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h index 36fb381d3e291..135dd06186c4b 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h @@ -568,6 +568,10 @@ struct ParsedDWARFTypeAttributes { ///< Indicates ref-qualifier of C++ member function if present. ///< Is RQ_None otherwise. clang::RefQualifierKind ref_qual = clang::RQ_None; + + ///< Has a value if this DIE represents an enum that was declared + ///< with enum_extensibility. + std::optional enum_kind; }; #endif // LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_DWARFASTPARSERCLANG_H diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp index 1da8fbe0bcd6d..4901b6029d9ce 100644 --- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp +++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp @@ -1666,6 +1666,12 @@ TypeSystemClang::CreateClassTemplateSpecializationDecl( ast.getTypeDeclType(class_template_specialization_decl, nullptr); class_template_specialization_decl->setDeclName( class_template_decl->getDeclName()); + + // FIXME: set to fixed value for now so it's not uninitialized. + // One way to determine StrictPackMatch would be + // Sema::CheckTemplateTemplateArgument. + class_template_specialization_decl->setStrictPackMatch(false); + SetOwningModule(class_template_specialization_decl, owning_module); decl_ctx->addDecl(class_template_specialization_decl); @@ -2297,7 +2303,8 @@ CompilerType TypeSystemClang::GetOrCreateStructForIdentifier( CompilerType TypeSystemClang::CreateEnumerationType( llvm::StringRef name, clang::DeclContext *decl_ctx, OptionalClangModuleID owning_module, const Declaration &decl, - const CompilerType &integer_clang_type, bool is_scoped) { + const CompilerType &integer_clang_type, bool is_scoped, + std::optional enum_kind) { // TODO: Do something intelligent with the Declaration object passed in // like maybe filling in the SourceLocation with it... ASTContext &ast = getASTContext(); @@ -2315,6 +2322,10 @@ CompilerType TypeSystemClang::CreateEnumerationType( if (decl_ctx) decl_ctx->addDecl(enum_decl); + if (enum_kind) + enum_decl->addAttr( + clang::EnumExtensibilityAttr::CreateImplicit(ast, *enum_kind)); + // TODO: check if we should be setting the promotion type too? enum_decl->setIntegerType(ClangUtil::GetQualType(integer_clang_type)); diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h index e70ad4c2973a5..99d9becffd128 100644 --- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h +++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h @@ -22,6 +22,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/ASTFwd.h" +#include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/Type.h" @@ -498,12 +499,12 @@ class TypeSystemClang : public TypeSystem { bool is_vector); // Enumeration Types - CompilerType CreateEnumerationType(llvm::StringRef name, - clang::DeclContext *decl_ctx, - OptionalClangModuleID owning_module, - const Declaration &decl, - const CompilerType &integer_qual_type, - bool is_scoped); + CompilerType CreateEnumerationType( + llvm::StringRef name, clang::DeclContext *decl_ctx, + OptionalClangModuleID owning_module, const Declaration &decl, + const CompilerType &integer_qual_type, bool is_scoped, + std::optional enum_kind = + std::nullopt); // Integer type functions diff --git a/lldb/source/Symbol/UnwindTable.cpp b/lldb/source/Symbol/UnwindTable.cpp index da88b0c9c4ea1..61d51192bf3d1 100644 --- a/lldb/source/Symbol/UnwindTable.cpp +++ b/lldb/source/Symbol/UnwindTable.cpp @@ -86,6 +86,7 @@ void UnwindTable::Initialize() { void UnwindTable::ModuleWasUpdated() { std::lock_guard guard(m_mutex); m_scanned_all_unwind_sources = false; + m_unwinds.clear(); } UnwindTable::~UnwindTable() = default; @@ -118,7 +119,7 @@ UnwindTable::GetAddressRange(const Address &addr, const SymbolContext &sc) { FuncUnwindersSP UnwindTable::GetFuncUnwindersContainingAddress(const Address &addr, - SymbolContext &sc) { + const SymbolContext &sc) { Initialize(); std::lock_guard guard(m_mutex); diff --git a/lldb/source/ValueObject/ValueObject.cpp b/lldb/source/ValueObject/ValueObject.cpp index 2864af107b925..9d98f62c0379b 100644 --- a/lldb/source/ValueObject/ValueObject.cpp +++ b/lldb/source/ValueObject/ValueObject.cpp @@ -849,6 +849,22 @@ bool ValueObject::SetData(DataExtractor &data, Status &error) { return true; } +llvm::ArrayRef ValueObject::GetLocalBuffer() const { + if (m_value.GetValueType() != Value::ValueType::HostAddress) + return {}; + auto start = m_value.GetScalar().ULongLong(LLDB_INVALID_ADDRESS); + if (start == LLDB_INVALID_ADDRESS) + return {}; + // Does our pointer point to this value object's m_data buffer? + if ((uint64_t)m_data.GetDataStart() == start) + return m_data.GetData(); + // Does our pointer point to the value's buffer? + if ((uint64_t)m_value.GetBuffer().GetBytes() == start) + return m_value.GetBuffer().GetData(); + // Our pointer points to something else. We can't know what the size is. + return {}; +} + static bool CopyStringDataToBufferSP(const StreamString &source, lldb::WritableDataBufferSP &destination) { llvm::StringRef src = source.GetString(); diff --git a/lldb/source/ValueObject/ValueObjectDynamicValue.cpp b/lldb/source/ValueObject/ValueObjectDynamicValue.cpp index 588c644bbfd07..ecd663af68c2d 100644 --- a/lldb/source/ValueObject/ValueObjectDynamicValue.cpp +++ b/lldb/source/ValueObject/ValueObjectDynamicValue.cpp @@ -145,6 +145,7 @@ bool ValueObjectDynamicValue::UpdateValue() { Address dynamic_address; bool found_dynamic_type = false; Value::ValueType value_type; + llvm::ArrayRef local_buffer; LanguageRuntime *runtime = nullptr; @@ -157,7 +158,7 @@ bool ValueObjectDynamicValue::UpdateValue() { // Try the preferred runtime first. found_dynamic_type = preferred_runtime->GetDynamicTypeAndAddress( *m_parent, m_use_dynamic, class_type_or_name, dynamic_address, - value_type); + value_type, local_buffer); if (found_dynamic_type) // Set the operative `runtime` for later use in this function. runtime = preferred_runtime; @@ -166,20 +167,20 @@ bool ValueObjectDynamicValue::UpdateValue() { // Fallback to the runtime for `known_type`. found_dynamic_type = runtime->GetDynamicTypeAndAddress( *m_parent, m_use_dynamic, class_type_or_name, dynamic_address, - value_type); + value_type, local_buffer); } else { runtime = process->GetLanguageRuntime(lldb::eLanguageTypeC_plus_plus); if (runtime) found_dynamic_type = runtime->GetDynamicTypeAndAddress( *m_parent, m_use_dynamic, class_type_or_name, dynamic_address, - value_type); + value_type, local_buffer); if (!found_dynamic_type) { runtime = process->GetLanguageRuntime(lldb::eLanguageTypeObjC); if (runtime) found_dynamic_type = runtime->GetDynamicTypeAndAddress( *m_parent, m_use_dynamic, class_type_or_name, dynamic_address, - value_type); + value_type, local_buffer); } } @@ -239,11 +240,29 @@ bool ValueObjectDynamicValue::UpdateValue() { if (m_address.IsValid()) SetValueDidChange(true); - // We've moved, so we should be fine... - m_address = dynamic_address; - lldb::TargetSP target_sp(GetTargetSP()); - lldb::addr_t load_address = m_address.GetLoadAddress(target_sp.get()); - m_value.GetScalar() = load_address; + // If we found a host address, and the dynamic type fits in the local buffer + // that was found, point to that buffer. Later on this function will copy + // the buffer over. + if (value_type == Value::ValueType::HostAddress && !local_buffer.empty()) { + auto *exe_scope = exe_ctx.GetBestExecutionContextScope(); + // If we found a host address but it doesn't fit in the buffer, there's + // nothing we can do. + if (local_buffer.size() < + m_dynamic_type_info.GetCompilerType().GetByteSize(exe_scope)) { + SetValueIsValid(false); + return false; + } + + m_value.GetScalar() = (uint64_t)local_buffer.data(); + m_address = LLDB_INVALID_ADDRESS; + } else { + // Otherwise we have a legitimate address on the target. Point to the load + // address. + m_address = dynamic_address; + lldb::TargetSP target_sp(GetTargetSP()); + lldb::addr_t load_address = m_address.GetLoadAddress(target_sp.get()); + m_value.GetScalar() = load_address; + } } if (runtime) @@ -258,7 +277,11 @@ bool ValueObjectDynamicValue::UpdateValue() { LLDB_LOGF(log, "[%s %p] has a new dynamic type %s", GetName().GetCString(), static_cast(this), GetTypeName().GetCString()); - if (m_address.IsValid() && m_dynamic_type_info) { + // m_address could be invalid but we could still have a local buffer + // containing the dynamic value. + if ((m_address.IsValid() || + m_value.GetValueType() == Value::ValueType::HostAddress) && + m_dynamic_type_info) { // The variable value is in the Scalar value inside the m_value. We can // point our m_data right to it. m_error = m_value.GetValueAsData(&exe_ctx, m_data, GetModule().get()); diff --git a/lldb/test/Shell/Commands/command-breakpoint-by-addr.test b/lldb/test/Shell/Commands/command-breakpoint-by-addr.test new file mode 100644 index 0000000000000..0a9dfd916a9c7 --- /dev/null +++ b/lldb/test/Shell/Commands/command-breakpoint-by-addr.test @@ -0,0 +1,14 @@ +# RUN: %clang_host -g -O0 %S/Inputs/main.c -o %t.out +# RUN: %lldb %t.out -b -s %s | FileCheck %s + +breakpoint set -a "main+26" +breakpoint set -a "main+ 26" +breakpoint set -a "main +26" +breakpoint set -a "main + 26" +breakpoint set -a "main + 26" + +# CHECK: Breakpoint 1: address = +# CHECK: Breakpoint 2: address = +# CHECK: Breakpoint 3: address = +# CHECK: Breakpoint 4: address = +# CHECK: Breakpoint 5: address = diff --git a/lldb/test/Shell/Expr/TestEnumExtensibility.m b/lldb/test/Shell/Expr/TestEnumExtensibility.m new file mode 100644 index 0000000000000..738b4fa2c7786 --- /dev/null +++ b/lldb/test/Shell/Expr/TestEnumExtensibility.m @@ -0,0 +1,33 @@ +// UNSUPPORTED: system-linux, system-windows + +// RUN: %clangxx_host %s -c -g -o %t +// RUN: %lldb %t \ +// RUN: -o "target var gClosed gOpen gNS gNSOpts" \ +// RUN: -o "image dump ast" \ +// RUN: 2>&1 | FileCheck %s + +#import + +enum __attribute__((enum_extensibility(closed))) Closed { C1 } gClosed; + +enum __attribute__((enum_extensibility(open))) Open { O1 } gOpen; + +typedef NS_ENUM(int, NS) { N1 } gNS; + +typedef NS_OPTIONS(int, NSO) { OPT1 } gNSOpts; + +// CHECK: EnumDecl {{.*}} Closed +// CHECK-NEXT: |-EnumExtensibilityAttr {{.*}} Closed +// CHECK-NEXT: `-EnumConstantDecl {{.*}} C1 'Closed' + +// CHECK: EnumDecl {{.*}} Open +// CHECK-NEXT: |-EnumExtensibilityAttr {{.*}} Open +// CHECK-NEXT: `-EnumConstantDecl {{.*}} O1 'Open' + +// CHECK: EnumDecl {{.*}} NS +// CHECK-NEXT: |-EnumExtensibilityAttr {{.*}} Open +// CHECK-NEXT: `-EnumConstantDecl {{.*}} N1 'NS' + +// CHECK: EnumDecl {{.*}} NSO +// CHECK-NEXT: |-EnumExtensibilityAttr {{.*}} Open +// CHECK-NEXT: `-EnumConstantDecl {{.*}} OPT1 'NSO' diff --git a/lldb/test/Shell/SymbolFile/Inputs/target-symbols-add-unwind.c b/lldb/test/Shell/SymbolFile/Inputs/target-symbols-add-unwind.c deleted file mode 100644 index 237c8ce181774..0000000000000 --- a/lldb/test/Shell/SymbolFile/Inputs/target-symbols-add-unwind.c +++ /dev/null @@ -1 +0,0 @@ -int main() {} diff --git a/lldb/test/Shell/SymbolFile/target-symbols-add-unwind.test b/lldb/test/Shell/SymbolFile/target-symbols-add-unwind.test index 5420213d405e8..249d2ef79a891 100644 --- a/lldb/test/Shell/SymbolFile/target-symbols-add-unwind.test +++ b/lldb/test/Shell/SymbolFile/target-symbols-add-unwind.test @@ -1,27 +1,107 @@ -# TODO: When it's possible to run "image show-unwind" without a running -# process, we can remove the unsupported line below, and hard-code an ELF -# triple in the test. -# UNSUPPORTED: system-windows, system-darwin - -# RUN: cd %T -# RUN: %clang_host %S/Inputs/target-symbols-add-unwind.c -g \ -# RUN: -fno-unwind-tables -fno-asynchronous-unwind-tables \ -# RUN: -o target-symbols-add-unwind.debug -# RUN: llvm-objcopy --strip-debug target-symbols-add-unwind.debug \ -# RUN: target-symbols-add-unwind.stripped -# RUN: %lldb target-symbols-add-unwind.stripped -s %s -o quit | FileCheck %s - -process launch --stop-at-entry -image show-unwind -n main -# CHECK-LABEL: image show-unwind -n main +# NB: The minidump core file exists only because "image show-unwind" currently +# requires a process to exist. If that changes, it can be removed. + +# REQUIRES: x86, lld + +# RUN: split-file %s %t +# RUN: yaml2obj %t/a.core.yaml -o %t/a.core +# RUN: %clang -c --target=x86_64-pc-linux %t/a.s -o %t/a.o +# RUN: ld.lld --shared %t/a.o -o %t/a.debug --build-id=0xdeadbeef \ +# RUN: --image-base=0x10000 +# RUN: llvm-objcopy --strip-all %t/a.debug %t/a.stripped +# RUN: cd %t +# RUN: %lldb -c %t/a.core \ +# RUN: -o "settings set interpreter.stop-command-source-on-error false" \ +# RUN: -s %t/commands -o quit | FileCheck %s + +#--- commands + +image add a.stripped +image load --file a.stripped --slide 0 +image list +# CHECK-LABEL: image list +# CHECK: [ 0] DEADBEEF 0x0000000000010000 a.stripped + +## Due to missing symbol information this (incorrectly) prints the unwind +## information for public_fn1 +image show-unwind -n public_fn1 --cached true +# CHECK-LABEL: image show-unwind -n public_fn1 +# CHECK-NEXT: UNWIND PLANS for a.stripped`public_fn1 (start addr 0x12000) # CHECK-NOT: debug_frame UnwindPlan: -target symbols add -s target-symbols-add-unwind.stripped target-symbols-add-unwind.debug +target symbols add -s a.stripped a.debug # CHECK-LABEL: target symbols add # CHECK: symbol file {{.*}} has been added to {{.*}} -image show-unwind -n main -# CHECK-LABEL: image show-unwind -n main +image show-unwind -n private_fn --cached true +# CHECK-LABEL: image show-unwind -n private_fn +# CHECK-NEXT: UNWIND PLANS for a.stripped`private_fn (start addr 0x12010) # CHECK: debug_frame UnwindPlan: # CHECK-NEXT: This UnwindPlan originally sourced from DWARF CFI # CHECK-NEXT: This UnwindPlan is sourced from the compiler: yes. +# CHECK-NEXT: This UnwindPlan is valid at all instruction locations: no. +# CHECK-NEXT: This UnwindPlan is for a trap handler function: no. +# CHECK-NEXT: Address range of this UnwindPlan: [a.stripped.PT_LOAD[1]..text + 16-0x0000000000000013) + + +#--- a.s + + .text + .cfi_sections .debug_frame + .globl public_fn1, public_fn2 + + .p2align 12 +public_fn1: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + .cfi_offset %rbp, -16 + popq %rbp + .cfi_def_cfa %rsp, 8 + retq + .cfi_endproc + + .p2align 4 +private_fn: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + .cfi_offset %rbp, -16 + popq %rbp + .cfi_def_cfa %rsp, 8 + retq + .cfi_endproc + + .p2align 4 +public_fn2: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + .cfi_offset %rbp, -16 + popq %rbp + .cfi_def_cfa %rsp, 8 + retq + .cfi_endproc + +#--- a.core.yaml +--- !minidump +Streams: + - Type: SystemInfo + Processor Arch: AMD64 + Platform ID: Linux + CPU: + Vendor ID: GenuineIntel + Version Info: 0x00000000 + Feature Info: 0x00000000 + - Type: ThreadList + Threads: + - Thread Id: 0x000074F3 + Context: 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B001000000000006CAE000000006B7FC05A0000C81D415A0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000A2BF9E5A6B7F0000000000000000000000000000000000008850C14BFD7F00009850C14BFD7F00000100000000000000B04AC14BFD7F0000000000000000000060812D01000000000800000000000000B065E05A6B7F00008004400000000000E050C14BFD7F00000000000000000000000000000000000004400000000000007F03FFFF0000FFFFFFFFFFFF000000000000000000000000801F00006B7F00000400000000000000B84CC14BFD7F0000304D405A6B7F0000C84DC14BFD7F0000C0AA405A6B7F00004F033D0000000000B84DC14BFD7F0000E84DC14BFD7F0000000000000000000000000000000000000070E05A6B7F000078629E5A6B7F0000C81D415A6B7F0000804F9E5A6B7F00000000000001000000E603000001000000E093115A6B7F0000804EC14BFD7F0000584EC14BFD7F000099ADC05A6B7F00000100000000000000AAAAD77D0000000002000000000000000800000000000000B065E05A6B7F0000E6B7C05A6B7F0000010000006B7F0000884DC14BFD7F0000106F7C5A6B7F0000984EC14BFD7F0000488B7C5A6B7F0000C4A71CB90000000001000000000000000800000000000000B065E05A6B7F000048B6C05A6B7F0000702AE25A6B7F0000D84DC14BFD7F000030489E5A6B7F0000E84EC14BFD7F0000E05E9E5A6B7F00000991F0460000000001000000000000000800000000000000B065E05A6B7F000048B6C05A6B7F00000100000000000000284EC14BFD7F00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + Stack: + Start of Memory Range: 0x00007FFD4BC15080 + Content: 30044000000000000000000000000000 + - Type: MemoryList + Memory Ranges: + - Start of Memory Range: 0x00007FFD4BC15080 + Content: 30044000000000000000000000000000 +... diff --git a/lldb/tools/debugserver/source/RNBRemote.cpp b/lldb/tools/debugserver/source/RNBRemote.cpp index efa015920c0d5..8a53094429aba 100644 --- a/lldb/tools/debugserver/source/RNBRemote.cpp +++ b/lldb/tools/debugserver/source/RNBRemote.cpp @@ -6369,6 +6369,8 @@ rnb_err_t RNBRemote::HandlePacket_qProcessInfo(const char *p) { rep << "ostype:bridgeos;"; #elif defined(TARGET_OS_OSX) && TARGET_OS_OSX == 1 rep << "ostype:macosx;"; +#elif defined(TARGET_OS_XR) && TARGET_OS_XR == 1 + rep << "ostype:xros;"; #else rep << "ostype:ios;"; #endif @@ -6422,6 +6424,8 @@ rnb_err_t RNBRemote::HandlePacket_qProcessInfo(const char *p) { rep << "ostype:watchos;"; #elif defined(TARGET_OS_BRIDGE) && TARGET_OS_BRIDGE == 1 rep << "ostype:bridgeos;"; +#elif defined(TARGET_OS_XR) && TARGET_OS_XR == 1 + rep << "ostype:xros;"; #else rep << "ostype:ios;"; #endif diff --git a/lldb/unittests/TestingSupport/Symbol/ClangTestUtils.h b/lldb/unittests/TestingSupport/Symbol/ClangTestUtils.h index 21525266119b4..63b2ba8c8688a 100644 --- a/lldb/unittests/TestingSupport/Symbol/ClangTestUtils.h +++ b/lldb/unittests/TestingSupport/Symbol/ClangTestUtils.h @@ -21,20 +21,21 @@ inline clang::DeclarationName getDeclarationName(TypeSystemClang &ast, return ast.getASTContext().DeclarationNames.getIdentifier(&II); } -inline CompilerType createRecord(TypeSystemClang &ast, llvm::StringRef name) { +inline CompilerType +createRecord(TypeSystemClang &ast, llvm::StringRef name, + lldb::LanguageType lang = lldb::LanguageType::eLanguageTypeC) { return ast.CreateRecordType(ast.getASTContext().getTranslationUnitDecl(), OptionalClangModuleID(), - lldb::AccessType::eAccessPublic, name, 0, - lldb::LanguageType::eLanguageTypeC); + lldb::AccessType::eAccessPublic, name, 0, lang); } /// Create a record with the given name and a field with the given type /// and name. -inline CompilerType createRecordWithField(TypeSystemClang &ast, - llvm::StringRef record_name, - CompilerType field_type, - llvm::StringRef field_name) { - CompilerType t = createRecord(ast, record_name); +inline CompilerType createRecordWithField( + TypeSystemClang &ast, llvm::StringRef record_name, CompilerType field_type, + llvm::StringRef field_name, + lldb::LanguageType lang = lldb::LanguageType::eLanguageTypeC) { + CompilerType t = createRecord(ast, record_name, lang); TypeSystemClang::StartTagDeclarationDefinition(t); ast.AddFieldToRecordType(t, field_name, field_type, @@ -63,12 +64,13 @@ struct SourceASTWithRecord { CompilerType record_type; clang::RecordDecl *record_decl = nullptr; clang::FieldDecl *field_decl = nullptr; - SourceASTWithRecord() { + SourceASTWithRecord( + lldb::LanguageType lang = lldb::LanguageType::eLanguageTypeC) { holder = std::make_unique("test ASTContext"); ast = holder->GetAST(); record_type = createRecordWithField( *ast, "Source", ast->GetBasicType(lldb::BasicType::eBasicTypeChar), - "a_field"); + "a_field", lang); record_decl = llvm::cast(ClangUtil::GetAsTagDecl(record_type)); field_decl = *record_decl->fields().begin(); diff --git a/lldb/unittests/ValueObject/CMakeLists.txt b/lldb/unittests/ValueObject/CMakeLists.txt index 14808aa2f213a..6ef0091647a59 100644 --- a/lldb/unittests/ValueObject/CMakeLists.txt +++ b/lldb/unittests/ValueObject/CMakeLists.txt @@ -1,6 +1,7 @@ add_lldb_unittest(LLDBValueObjectTests DumpValueObjectOptionsTests.cpp DILLexerTests.cpp + DynamicValueObjectLocalBuffer.cpp LINK_LIBS lldbValueObject diff --git a/lldb/unittests/ValueObject/DynamicValueObjectLocalBuffer.cpp b/lldb/unittests/ValueObject/DynamicValueObjectLocalBuffer.cpp new file mode 100644 index 0000000000000..417708dd2dc22 --- /dev/null +++ b/lldb/unittests/ValueObject/DynamicValueObjectLocalBuffer.cpp @@ -0,0 +1,240 @@ +//===---DynamicValueObjectLocalBuffer.cpp-----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Plugins/Platform/Linux/PlatformLinux.h" +#include "Plugins/ScriptInterpreter/None/ScriptInterpreterNone.h" +#include "Plugins/TypeSystem/Clang/TypeSystemClang.h" +#include "TestingSupport/SubsystemRAII.h" +#include "TestingSupport/Symbol/ClangTestUtils.h" +#include "lldb/Core/Debugger.h" +#include "lldb/Core/PluginManager.h" +#include "lldb/Target/Language.h" +#include "lldb/Target/LanguageRuntime.h" +#include "lldb/ValueObject/ValueObject.h" +#include "lldb/ValueObject/ValueObjectConstResult.h" + +#include "gtest/gtest.h" + +using namespace lldb; +using namespace lldb_private; +using namespace lldb_private::clang_utils; + +// This entire class is boilerplate. +struct MockLanguage : public Language { + + llvm::StringRef GetPluginName() override { return "MockLanguage"; } + lldb::LanguageType GetLanguageType() const override { + return lldb::eLanguageTypeC_plus_plus; + }; + + static Language *CreateInstance(lldb::LanguageType language) { + return new MockLanguage(); + } + static void Initialize() { + PluginManager::RegisterPlugin("MockLanguage", "Mock Language", + CreateInstance); + }; + + static void Terminate() { PluginManager::UnregisterPlugin(CreateInstance); } + bool IsSourceFile(llvm::StringRef file_path) const override { return true; } +}; +LLDB_PLUGIN_DEFINE(MockLanguage) + +struct MockLanguageRuntime : public LanguageRuntime { + // This is the only method in this class that matters for this test. + // This will unconditionally succeed and return a type with size 4, + // a value_type of HostAddress, and a local buffer that points to the parent's + // local buffer. + // The tests will set that buffer to be either be larger or smaller than the + // type we're returning. + bool + GetDynamicTypeAndAddress(ValueObject &in_value, + lldb::DynamicValueType use_dynamic, + TypeAndOrName &class_type_or_name, Address &address, + Value::ValueType &value_type, + llvm::ArrayRef &local_buffer) override { + auto ast = in_value.GetCompilerType() + .GetTypeSystem() + .dyn_cast_or_null(); + + auto int_type = createRecordWithField( + *ast, "TypeWitInt", ast->GetBasicType(lldb::BasicType::eBasicTypeInt), + "theIntField", LanguageType::eLanguageTypeC_plus_plus); + class_type_or_name.SetCompilerType(int_type); + local_buffer = in_value.GetLocalBuffer(); + value_type = Value::ValueType::HostAddress; + return true; + } + + // All of this is boilerplate. + MockLanguageRuntime(Process *process) : LanguageRuntime(process) {} + llvm::StringRef GetPluginName() override { return "MockLanguageRuntime"; } + lldb::LanguageType GetLanguageType() const override { + return lldb::eLanguageTypeC_plus_plus; + } + + llvm::Error GetObjectDescription(Stream &str, ValueObject &object) override { + return llvm::Error::success(); + } + + llvm::Error GetObjectDescription(Stream &str, Value &value, + ExecutionContextScope *exe_scope) override { + return llvm::Error::success(); + } + + bool CouldHaveDynamicValue(ValueObject &in_value) override { return true; } + + TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name, + ValueObject &static_value) override { + return type_and_or_name; + } + + lldb::BreakpointResolverSP + CreateExceptionResolver(const lldb::BreakpointSP &bkpt, bool catch_bp, + bool throw_bp) override { + return lldb::BreakpointResolverSP(); + } + + lldb::ThreadPlanSP GetStepThroughTrampolinePlan(Thread &thread, + bool stop_others) override { + return {}; + } + + static LanguageRuntime *CreateInstance(Process *process, + LanguageType language) { + return new MockLanguageRuntime(process); + } + + static void Initialize() { + PluginManager::RegisterPlugin( + "MockLanguageRuntime", "MockLanguageRuntime", CreateInstance, + [](CommandInterpreter &interpreter) -> lldb::CommandObjectSP { + return {}; + }, + [](lldb::LanguageType language, + bool throw_bp) -> BreakpointPreconditionSP { return {}; }); + } + + static void Terminate() { PluginManager::UnregisterPlugin(CreateInstance); } +}; +LLDB_PLUGIN_DEFINE(MockLanguageRuntime) + +// This entire class is boilerplate. +struct MockProcess : Process { + MockProcess(lldb::TargetSP target_sp, lldb::ListenerSP listener_sp) + : Process(target_sp, listener_sp) {} + + llvm::StringRef GetPluginName() override { return "mock process"; } + + bool CanDebug(lldb::TargetSP target, bool plugin_specified_by_name) override { + return false; + }; + + Status DoDestroy() override { return {}; } + + void RefreshStateAfterStop() override {} + + bool DoUpdateThreadList(ThreadList &old_thread_list, + ThreadList &new_thread_list) override { + return false; + }; + + size_t DoReadMemory(lldb::addr_t vm_addr, void *buf, size_t size, + Status &error) override { + // No need to read memory in these tests. + return size; + } +}; + +class DynamicValueObjectLocalBufferTest : public ::testing::Test { +public: + void SetUp() override { + ArchSpec arch("i386-pc-linux"); + Platform::SetHostPlatform( + platform_linux::PlatformLinux::CreateInstance(true, &arch)); + // std::call_once(TestUtilities::g_debugger_initialize_flag, + // []() { Debugger::Initialize(nullptr); }); + m_debugger_sp = Debugger::CreateInstance(); + ASSERT_TRUE(m_debugger_sp); + m_debugger_sp->GetTargetList().CreateTarget(*m_debugger_sp, "", arch, + eLoadDependentsNo, + m_platform_sp, m_target_sp); + ASSERT_TRUE(m_target_sp); + ASSERT_TRUE(m_target_sp->GetArchitecture().IsValid()); + ASSERT_TRUE(m_platform_sp); + m_listener_sp = Listener::MakeListener("dummy"); + m_process_sp = std::make_shared(m_target_sp, m_listener_sp); + ASSERT_TRUE(m_process_sp); + m_exe_ctx = ExecutionContext(m_process_sp); + + m_holder = std::make_unique("test"); + m_type_system = m_holder->GetAST(); + LLDB_PLUGIN_INITIALIZE(MockLanguage); + LLDB_PLUGIN_INITIALIZE(MockLanguageRuntime); + } + void TearDown() override { + LLDB_PLUGIN_TERMINATE(MockLanguage); + LLDB_PLUGIN_TERMINATE(MockLanguageRuntime); + } + + void TestValueObjectWithLocalBuffer(DataExtractor &data_extractor, + bool should_succeed) { + std::unique_ptr holder = + std::make_unique("test ASTContext"); + TypeSystemClang *ast = holder->GetAST(); + auto char_type = createRecordWithField( + *ast, "TypeWithChar", + ast->GetBasicType(lldb::BasicType::eBasicTypeChar), "theField"); + + ExecutionContextScope *exe_scope = m_exe_ctx.GetBestExecutionContextScope(); + ConstString var_name("test_var"); + auto valobj_sp = ValueObjectConstResult::Create(exe_scope, char_type, + var_name, data_extractor); + auto dyn_valobj = valobj_sp->GetDynamicValue(lldb::eDynamicCanRunTarget); + ASSERT_TRUE(dyn_valobj->GetValueIsValid() == should_succeed); + } + + SubsystemRAII + m_subsystems; + std::unique_ptr m_holder; + lldb::DebuggerSP m_debugger_sp; + lldb::TargetSP m_target_sp; + lldb::PlatformSP m_platform_sp; + lldb::ListenerSP m_listener_sp; + lldb::ProcessSP m_process_sp; + ExecutionContext m_exe_ctx; + TypeSystemClang *m_type_system; +}; + +TEST_F(DynamicValueObjectLocalBufferTest, BufferTooSmall) { + /// Test that a value object with a buffer to small to fit the + /// "dynamic" type will return an invalid dynamic value object. + uint8_t value = 1; + ByteOrder endian = endian::InlHostByteOrder(); + DataExtractor data_extractor{&value, sizeof(value), endian, 4}; + TestValueObjectWithLocalBuffer(data_extractor, false); +} + +TEST_F(DynamicValueObjectLocalBufferTest, BufferTooBig) { + /// Test that a value object with a buffer big enough fit the + /// "dynamic" type will return a valid dynamic value object. + uint64_t value = 1; + ByteOrder endian = endian::InlHostByteOrder(); + DataExtractor data_extractor{&value, sizeof(value), endian, 4}; + TestValueObjectWithLocalBuffer(data_extractor, true); +} + +TEST_F(DynamicValueObjectLocalBufferTest, BufferExactlyRight) { + /// Test that a value object with a buffer exactly the size of the + /// "dynamic" type will return a valid dynamic value object. + uint32_t value = 1; + ByteOrder endian = endian::InlHostByteOrder(); + DataExtractor data_extractor{&value, sizeof(value), endian, 4}; + TestValueObjectWithLocalBuffer(data_extractor, true); +} diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 8891aedcb58e5..f57c29ccdd588 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -16122,6 +16122,65 @@ of the argument. When specified with the fast-math-flag 'afn', the result may be approximated using a less accurate calculation. +'``llvm.modf.*``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +This is an overloaded intrinsic. You can use ``llvm.modf`` on any floating-point +or vector of floating-point type. However, not all targets support all types. + +:: + + declare { float, float } @llvm.modf.f32(float %Val) + declare { double, double } @llvm.modf.f64(double %Val) + declare { x86_fp80, x86_fp80 } @llvm.modf.f80(x86_fp80 %Val) + declare { fp128, fp128 } @llvm.modf.f128(fp128 %Val) + declare { ppc_fp128, ppc_fp128 } @llvm.modf.ppcf128(ppc_fp128 %Val) + declare { <4 x float>, <4 x float> } @llvm.modf.v4f32(<4 x float> %Val) + +Overview: +""""""""" + +The '``llvm.modf.*``' intrinsics return the operand's integral and fractional +parts. + +Arguments: +"""""""""" + +The argument is a :ref:`floating-point ` value or +:ref:`vector ` of floating-point values. Returns two values matching +the argument type in a struct. + +Semantics: +"""""""""" + +Return the same values as a corresponding libm '``modf``' function without +trapping or setting ``errno``. + +The first result is the fractional part of the operand and the second result is +the integral part of the operand. Both results have the same sign as the operand. + +Not including exceptional inputs (listed below), ``llvm.modf.*`` is semantically +equivalent to: + +:: + + %fp = frem %x, 1.0 ; Fractional part + %ip = fsub %x, %fp ; Integral part + +(assuming no floating-point precision errors) + +If the argument is a zero, returns a zero with the same sign for both the +fractional and integral parts. + +If the argument is an infinity, returns a fractional part of zero with the same +sign, and infinity with the same sign as the integral part. + +When specified with the fast-math-flag 'afn', the result may be approximated +using a less accurate calculation. + '``llvm.pow.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst index dec6ad4e54115..dcd0a3ac3639b 100644 --- a/llvm/docs/NVPTXUsage.rst +++ b/llvm/docs/NVPTXUsage.rst @@ -1060,6 +1060,81 @@ flavors of the instruction respectively. For more information, refer to the PTX ISA ``_. +'``llvm.nvvm.tcgen05.commit``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.commit.{cg1,cg2}(ptr %mbar) + declare void @llvm.nvvm.tcgen05.commit.shared.{cg1,cg2}(ptr addrspace(3) %mbar) + declare void @llvm.nvvm.tcgen05.commit.mc.{cg1,cg2}(ptr %mbar, i16 %mc) + declare void @llvm.nvvm.tcgen05.commit.mc.shared.{cg1,cg2}(ptr addrspace(3) %mbar, i16 %mc) + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.commit.*``' intrinsics correspond to the +``tcgen05.commit.{cg1/cg2}.mbarrier::arrive::one.*`` set of PTX instructions. +The ``tcgen05.commit`` is an asynchronous instruction which makes the mbarrier +object (``%mbar``) track the completion of all prior asynchronous tcgen05 operations. +The ``.mc`` variants allow signaling on the mbarrier objects of multiple CTAs +(specified by ``%mc``) in the cluster. The ``.cg1`` and ``.cg2`` variants generate +``cta_group::1`` and ``cta_group::2`` flavors of the instruction respectively. + +For more information, refer to the PTX ISA +``_. + +'``llvm.nvvm.tcgen05.wait``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.wait.ld() + declare void @llvm.nvvm.tcgen05.wait.st() + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.wait.ld/st``' intrinsics correspond to +the ``tcgen05.wait::{ld/st}.sync.aligned`` pair of PTX instructions. +The ``tcgen05.wait::ld`` causes the executing thread to block until +all prior ``tcgen05.ld`` operations issued by the executing thread +have completed. The ``tcgen05.wait::st`` causes the executing thread +to block until all prior ``tcgen05.st`` operations issued by the +executing thread have completed. + +For more information, refer to the PTX ISA +``_. + +'``llvm.nvvm.tcgen05.fence``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.fence.before.thread.sync() + declare void @llvm.nvvm.tcgen05.fence.after.thread.sync() + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.fence.*``' intrinsics correspond to +the ``tcgen05.fence::{before/after}_thread_sync`` pair of PTX instructions. +These instructions act as code motion fences for asynchronous tcgen05 +operations. + +For more information, refer to the PTX ISA +``_. + + Other Intrinsics ---------------- diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h index 21e28d546286e..fbab066bf4517 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainer.h +++ b/llvm/include/llvm/BinaryFormat/DXContainer.h @@ -14,6 +14,8 @@ #define LLVM_BINARYFORMAT_DXCONTAINER_H #include "llvm/ADT/StringRef.h" +#include "llvm/Support/BinaryStreamError.h" +#include "llvm/Support/Error.h" #include "llvm/Support/SwapByteOrder.h" #include "llvm/TargetParser/Triple.h" @@ -152,6 +154,11 @@ enum class FeatureFlags : uint64_t { static_assert((uint64_t)FeatureFlags::NextUnusedBit <= 1ull << 63, "Shader flag bits exceed enum size."); +#define ROOT_ELEMENT_FLAG(Num, Val) Val = 1ull << Num, +enum class RootElementFlag : uint32_t { +#include "DXContainerConstants.def" +}; + PartType parsePartType(StringRef S); struct VertexPSVInfo { @@ -541,6 +548,23 @@ struct ProgramSignatureElement { static_assert(sizeof(ProgramSignatureElement) == 32, "ProgramSignatureElement is misaligned"); +struct RootSignatureValidations { + + static Expected validateRootFlag(uint32_t Flags) { + if ((Flags & ~0x80000fff) != 0) + return llvm::make_error("Invalid Root Signature flag"); + return Flags; + } + + static Expected validateVersion(uint32_t Version) { + if (Version == 1 || Version == 2) + return Version; + + return llvm::make_error( + "Invalid Root Signature Version"); + } +}; + } // namespace dxbc } // namespace llvm diff --git a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def index 96d4499c9cadc..6d44ea14df444 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def +++ b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def @@ -4,6 +4,7 @@ CONTAINER_PART(DXIL) CONTAINER_PART(SFI0) CONTAINER_PART(HASH) CONTAINER_PART(PSV0) +CONTAINER_PART(RTS0) CONTAINER_PART(ISG1) CONTAINER_PART(OSG1) CONTAINER_PART(PSG1) @@ -52,6 +53,26 @@ SHADER_FEATURE_FLAG(31, 36, NextUnusedBit, "Next reserved shader flag bit (not a #undef SHADER_FEATURE_FLAG #endif // SHADER_FEATURE_FLAG + +// ROOT_ELEMENT_FLAG(bit offset for the flag, name). +#ifdef ROOT_ELEMENT_FLAG + +ROOT_ELEMENT_FLAG(0, AllowInputAssemblerInputLayout) +ROOT_ELEMENT_FLAG(1, DenyVertexShaderRootAccess) +ROOT_ELEMENT_FLAG(2, DenyHullShaderRootAccess) +ROOT_ELEMENT_FLAG(3, DenyDomainShaderRootAccess) +ROOT_ELEMENT_FLAG(4, DenyGeometryShaderRootAccess) +ROOT_ELEMENT_FLAG(5, DenyPixelShaderRootAccess) +ROOT_ELEMENT_FLAG(6, AllowStreamOutput) +ROOT_ELEMENT_FLAG(7, LocalRootSignature) +ROOT_ELEMENT_FLAG(8, DenyAmplificationShaderRootAccess) +ROOT_ELEMENT_FLAG(9, DenyMeshShaderRootAccess) +ROOT_ELEMENT_FLAG(10, CBVSRVUAVHeapDirectlyIndexed) +ROOT_ELEMENT_FLAG(11, SamplerHeapDirectlyIndexed) +#undef ROOT_ELEMENT_FLAG +#endif // ROOT_ELEMENT_FLAG + + #ifdef DXIL_MODULE_FLAG // Only save DXIL module flags which not map to feature flags here. diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h index 8853c4a88b0b5..4b826bbf58f17 100644 --- a/llvm/include/llvm/BinaryFormat/ELF.h +++ b/llvm/include/llvm/BinaryFormat/ELF.h @@ -1798,6 +1798,7 @@ enum : unsigned { GNU_PROPERTY_AARCH64_FEATURE_1_AND = 0xc0000000, GNU_PROPERTY_AARCH64_FEATURE_PAUTH = 0xc0000001, GNU_PROPERTY_X86_FEATURE_1_AND = 0xc0000002, + GNU_PROPERTY_RISCV_FEATURE_1_AND = 0xc0000000, GNU_PROPERTY_X86_UINT32_OR_LO = 0xc0008000, GNU_PROPERTY_X86_FEATURE_2_NEEDED = GNU_PROPERTY_X86_UINT32_OR_LO + 1, @@ -1862,6 +1863,13 @@ enum : unsigned { GNU_PROPERTY_X86_ISA_1_V4 = 1 << 3, }; +// RISC-V processor feature bits. +enum : unsigned { + GNU_PROPERTY_RISCV_FEATURE_1_CFI_LP_UNLABELED = 1 << 0, + GNU_PROPERTY_RISCV_FEATURE_1_CFI_SS = 1 << 1, + GNU_PROPERTY_RISCV_FEATURE_1_CFI_LP_FUNC_SIG = 1 << 2, +}; + // FreeBSD note types. enum { NT_FREEBSD_ABI_TAG = 1, diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index a76de251c7138..8468992ed4b7a 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -2101,6 +2101,9 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { case Intrinsic::sincos: ISD = ISD::FSINCOS; break; + case Intrinsic::modf: + ISD = ISD::FMODF; + break; case Intrinsic::tan: ISD = ISD::FTAN; break; diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h index 5d7e03bbaeb7d..9472aa196f9b4 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h @@ -119,20 +119,17 @@ struct LegalityQuery { MemDesc(LLT MemoryTy, uint64_t AlignInBits, AtomicOrdering Ordering) : MemoryTy(MemoryTy), AlignInBits(AlignInBits), Ordering(Ordering) {} MemDesc(const MachineMemOperand &MMO) - : MemoryTy(MMO.getMemoryType()), - AlignInBits(MMO.getAlign().value() * 8), - Ordering(MMO.getSuccessOrdering()) {} + : MemDesc(MMO.getMemoryType(), MMO.getAlign().value() * 8, + MMO.getSuccessOrdering()) {} }; /// Operations which require memory can use this to place requirements on the /// memory type for each MMO. ArrayRef MMODescrs; - constexpr LegalityQuery(unsigned Opcode, const ArrayRef Types, - const ArrayRef MMODescrs) + constexpr LegalityQuery(unsigned Opcode, ArrayRef Types, + ArrayRef MMODescrs = {}) : Opcode(Opcode), Types(Types), MMODescrs(MMODescrs) {} - constexpr LegalityQuery(unsigned Opcode, const ArrayRef Types) - : LegalityQuery(Opcode, Types, {}) {} raw_ostream &print(raw_ostream &OS) const; }; diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index fd8784a4c1003..046d9befd0e91 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1058,6 +1058,10 @@ enum NodeType { /// FSINCOS - Compute both fsin and fcos as a single operation. FSINCOS, + /// FMODF - Decomposes the operand into integral and fractional parts, each + /// having the same type and sign as the operand. + FMODF, + /// Gets the current floating-point environment. The first operand is a token /// chain. The results are FP environment, represented by an integer value, /// and a token chain. diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h index e1f1a1efecc72..4762494e6ccb7 100644 --- a/llvm/include/llvm/CodeGen/MachineScheduler.h +++ b/llvm/include/llvm/CodeGen/MachineScheduler.h @@ -1385,24 +1385,6 @@ std::unique_ptr createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI); -class MachineSchedulerPass : public PassInfoMixin { - const TargetMachine *TM; - -public: - MachineSchedulerPass(const TargetMachine *TM) : TM(TM) {} - PreservedAnalyses run(MachineFunction &MF, - MachineFunctionAnalysisManager &MFAM); -}; - -class PostMachineSchedulerPass - : public PassInfoMixin { - const TargetMachine *TM; - -public: - PostMachineSchedulerPass(const TargetMachine *TM) : TM(TM) {} - PreservedAnalyses run(MachineFunction &MF, - MachineFunctionAnalysisManager &MFAM); -}; } // end namespace llvm #endif // LLVM_CODEGEN_MACHINESCHEDULER_H diff --git a/llvm/include/llvm/CodeGen/RDFGraph.h b/llvm/include/llvm/CodeGen/RDFGraph.h index cf7344e8c3e74..8a93afbcb5491 100644 --- a/llvm/include/llvm/CodeGen/RDFGraph.h +++ b/llvm/include/llvm/CodeGen/RDFGraph.h @@ -865,8 +865,9 @@ struct DataFlowGraph { using BlockRefsMap = RegisterAggrMap; void buildStmt(Block BA, MachineInstr &In); - void recordDefsForDF(BlockRefsMap &PhiM, Block BA); - void buildPhis(BlockRefsMap &PhiM, Block BA); + void recordDefsForDF(BlockRefsMap &PhiM, BlockRefsMap &PhiClobberM, Block BA); + void buildPhis(BlockRefsMap &PhiM, Block BA, + const DefStackMap &DefM = DefStackMap()); void removeUnusedPhis(); void pushClobbers(Instr IA, DefStackMap &DM); @@ -874,7 +875,7 @@ struct DataFlowGraph { template void linkRefUp(Instr IA, NodeAddr TA, DefStack &DS); template void linkStmtRefs(DefStackMap &DefM, Stmt SA, Predicate P); - void linkBlockRefs(DefStackMap &DefM, Block BA); + void linkBlockRefs(DefStackMap &DefM, BlockRefsMap &PhiClobberM, Block BA); void unlinkUseDF(Use UA); void unlinkDefDF(Def DA); diff --git a/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h b/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h index 045ec7d365311..59313520e0d83 100644 --- a/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h +++ b/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h @@ -66,6 +66,10 @@ Libcall getFREXP(EVT RetVT); /// UNKNOWN_LIBCALL if there is none. Libcall getFSINCOS(EVT RetVT); +/// getMODF - Return the MODF_* value for the given types, or +/// UNKNOWN_LIBCALL if there is none. +Libcall getMODF(EVT RetVT); + /// Return the SYNC_FETCH_AND_* value for the given opcode and type, or /// UNKNOWN_LIBCALL if there is none. Libcall getSYNC(unsigned Opc, MVT VT); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/llvm/include/llvm/ExecutionEngine/Orc/Core.h index 3eddaf4c9c59f..cecb4094c9a57 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/Core.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/Core.h @@ -1556,6 +1556,30 @@ class ExecutionSession { EPC->getDispatcher().dispatch(std::move(T)); } + /// Returns the bootstrap map. + const StringMap> &getBootstrapMap() const { + return EPC->getBootstrapMap(); + } + + /// Look up and SPS-deserialize a bootstrap map value. + template + Error getBootstrapMapValue(StringRef Key, std::optional &Val) const { + return EPC->getBootstrapMapValue(Key, Val); + } + + /// Returns the bootstrap symbol map. + const StringMap &getBootstrapSymbolsMap() const { + return EPC->getBootstrapSymbolsMap(); + } + + /// For each (ExecutorAddr&, StringRef) pair, looks up the string in the + /// bootstrap symbols map and writes its address to the ExecutorAddr if + /// found. If any symbol is not found then the function returns an error. + Error getBootstrapSymbols( + ArrayRef> Pairs) const { + return EPC->getBootstrapSymbols(Pairs); + } + /// Run a wrapper function in the executor. /// /// The wrapper function should be callable as: diff --git a/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h b/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h index 6e99f6c03a7c6..91842714f6c4c 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h @@ -368,6 +368,7 @@ class MachOPlatform : public Platform { DenseMap RegisteredInitSymbols; std::mutex PlatformMutex; + bool ForceEHFrames = false; BootstrapInfo *Bootstrap = nullptr; DenseMap JITDylibToHeaderAddr; DenseMap HeaderAddrToJITDylib; diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h new file mode 100644 index 0000000000000..d3277e61eeb7b --- /dev/null +++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h @@ -0,0 +1,28 @@ +//===- DefaultHostBootstrapValues.h - Defaults for host process -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Set sensible default bootstrap values for JIT execution in the host process. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_DEFAULTHOSTBOOTSTRAPVALUES_H +#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_DEFAULTHOSTBOOTSTRAPVALUES_H + +#include "llvm/ADT/StringMap.h" +#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h" +#include + +namespace llvm::orc { + +void addDefaultBootstrapValuesForHostProcess( + StringMap> &BootstrapMap, + StringMap &BootstrapSymbols); + +} // namespace llvm::orc + +#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_DEFAULTHOSTBOOTSTRAPVALUES_H diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index d4ce4b1d199d7..9671c81ab3d32 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1075,6 +1075,8 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in { def int_roundeven : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; def int_sincos : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>], [llvm_anyfloat_ty]>; + def int_modf : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>], + [llvm_anyfloat_ty]>; // Truncate a floating point number with a specific rounding mode def int_fptrunc_round : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index abbe25bf0040a..f299a145ac73b 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -5083,6 +5083,38 @@ foreach cta_group = ["cg1", "cg2"] in { def int_nvvm_tcgen05_relinq_alloc_permit_ # cta_group : Intrinsic<[], [], [IntrConvergent, IntrInaccessibleMemOnly]>; + + def int_nvvm_tcgen05_commit_ # cta_group : Intrinsic<[], + [llvm_ptr_ty], // mbar_ptr + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_shared_ # cta_group : Intrinsic<[], + [llvm_shared_ptr_ty], // mbar_ptr + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_mc_ # cta_group : Intrinsic<[], + [llvm_ptr_ty, llvm_i16_ty], // mbar_ptr, cta_mask + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_mc_shared_ # cta_group : Intrinsic<[], + [llvm_shared_ptr_ty, llvm_i16_ty], // mbar_ptr, cta_mask + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; } +// Tcgen05 wait_ld/st intrinsics +def int_nvvm_tcgen05_wait_ld : Intrinsic<[], [], + [IntrConvergent, IntrInaccessibleMemOnly]>; +def int_nvvm_tcgen05_wait_st : Intrinsic<[], [], + [IntrConvergent, IntrInaccessibleMemOnly]>; + +// Tcgen05 Fence intrinsics +def int_nvvm_tcgen05_fence_before_thread_sync : Intrinsic<[], [], + [IntrNoMem, IntrHasSideEffects]>; +def int_nvvm_tcgen05_fence_after_thread_sync : Intrinsic<[], [], + [IntrNoMem, IntrHasSideEffects]>; + } // let TargetPrefix = "nvvm" diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.def b/llvm/include/llvm/IR/RuntimeLibcalls.def index 8153845b52c7a..dc69b1ae19769 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.def +++ b/llvm/include/llvm/IR/RuntimeLibcalls.def @@ -354,6 +354,11 @@ HANDLE_LIBCALL(FREXP_F64, "frexp") HANDLE_LIBCALL(FREXP_F80, "frexpl") HANDLE_LIBCALL(FREXP_F128, "frexpl") HANDLE_LIBCALL(FREXP_PPCF128, "frexpl") +HANDLE_LIBCALL(MODF_F32, "modff") +HANDLE_LIBCALL(MODF_F64, "modf") +HANDLE_LIBCALL(MODF_F80, "modfl") +HANDLE_LIBCALL(MODF_F128, "modfl") +HANDLE_LIBCALL(MODF_PPCF128, "modfl") // Floating point environment HANDLE_LIBCALL(FEGETENV, "fegetenv") diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index b8df4d1ecab1d..6d74d7f24bf9a 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -209,7 +209,7 @@ void initializeMachinePipelinerPass(PassRegistry &); void initializeMachinePostDominatorTreeWrapperPassPass(PassRegistry &); void initializeMachineRegionInfoPassPass(PassRegistry &); void initializeMachineSanitizerBinaryMetadataPass(PassRegistry &); -void initializeMachineSchedulerLegacyPass(PassRegistry &); +void initializeMachineSchedulerPass(PassRegistry &); void initializeMachineSinkingPass(PassRegistry &); void initializeMachineTraceMetricsWrapperPassPass(PassRegistry &); void initializeMachineUniformityInfoPrinterPassPass(PassRegistry &); @@ -238,7 +238,7 @@ void initializePostDomPrinterWrapperPassPass(PassRegistry &); void initializePostDomViewerWrapperPassPass(PassRegistry &); void initializePostDominatorTreeWrapperPassPass(PassRegistry &); void initializePostInlineEntryExitInstrumenterPass(PassRegistry &); -void initializePostMachineSchedulerLegacyPass(PassRegistry &); +void initializePostMachineSchedulerPass(PassRegistry &); void initializePostRAHazardRecognizerPass(PassRegistry &); void initializePostRAMachineSinkingPass(PassRegistry &); void initializePostRASchedulerLegacyPass(PassRegistry &); diff --git a/llvm/include/llvm/MC/DXContainerRootSignature.h b/llvm/include/llvm/MC/DXContainerRootSignature.h new file mode 100644 index 0000000000000..e1a9be5fc52d8 --- /dev/null +++ b/llvm/include/llvm/MC/DXContainerRootSignature.h @@ -0,0 +1,28 @@ +//===- llvm/MC/DXContainerRootSignature.h - RootSignature -*- C++ -*- ========// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include + +namespace llvm { + +class raw_ostream; + +namespace mcdxbc { +struct RootSignatureHeader { + uint32_t Version = 2; + uint32_t NumParameters = 0; + uint32_t RootParametersOffset = 0; + uint32_t NumStaticSamplers = 0; + uint32_t StaticSamplersOffset = 0; + uint32_t Flags = 0; + + void write(raw_ostream &OS); +}; +} // namespace mcdxbc +} // namespace llvm diff --git a/llvm/include/llvm/Object/DXContainer.h b/llvm/include/llvm/Object/DXContainer.h index 19c83ba6c6e85..c3a2f756bd683 100644 --- a/llvm/include/llvm/Object/DXContainer.h +++ b/llvm/include/llvm/Object/DXContainer.h @@ -116,6 +116,28 @@ template struct ViewArray { }; namespace DirectX { + +class RootSignature { +private: + uint32_t Version; + uint32_t NumParameters; + uint32_t RootParametersOffset; + uint32_t NumStaticSamplers; + uint32_t StaticSamplersOffset; + uint32_t Flags; + +public: + RootSignature() {} + + Error parse(StringRef Data); + uint32_t getVersion() const { return Version; } + uint32_t getNumParameters() const { return NumParameters; } + uint32_t getRootParametersOffset() const { return RootParametersOffset; } + uint32_t getNumStaticSamplers() const { return NumStaticSamplers; } + uint32_t getStaticSamplersOffset() const { return StaticSamplersOffset; } + uint32_t getFlags() const { return Flags; } +}; + class PSVRuntimeInfo { using ResourceArray = ViewArray; @@ -287,6 +309,7 @@ class DXContainer { std::optional ShaderFeatureFlags; std::optional Hash; std::optional PSVInfo; + std::optional RootSignature; DirectX::Signature InputSignature; DirectX::Signature OutputSignature; DirectX::Signature PatchConstantSignature; @@ -296,6 +319,7 @@ class DXContainer { Error parseDXILHeader(StringRef Part); Error parseShaderFeatureFlags(StringRef Part); Error parseHash(StringRef Part); + Error parseRootSignature(StringRef Part); Error parsePSVInfo(StringRef Part); Error parseSignature(StringRef Part, DirectX::Signature &Array); friend class PartIterator; @@ -382,6 +406,10 @@ class DXContainer { std::optional getShaderHash() const { return Hash; } + std::optional getRootSignature() const { + return RootSignature; + } + const std::optional &getPSVInfo() const { return PSVInfo; }; diff --git a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h index 66ad057ab0e30..0200f5cb196ff 100644 --- a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h +++ b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h @@ -17,6 +17,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/BinaryFormat/DXContainer.h" +#include "llvm/Object/DXContainer.h" #include "llvm/ObjectYAML/YAML.h" #include "llvm/Support/YAMLTraits.h" #include @@ -72,6 +73,22 @@ struct ShaderHash { std::vector Digest; }; +#define ROOT_ELEMENT_FLAG(Num, Val) bool Val = false; +struct RootSignatureDesc { + RootSignatureDesc() = default; + RootSignatureDesc(const object::DirectX::RootSignature &Data); + + uint32_t Version; + uint32_t NumParameters; + uint32_t RootParametersOffset; + uint32_t NumStaticSamplers; + uint32_t StaticSamplersOffset; + + uint32_t getEncodedFlags(); + +#include "llvm/BinaryFormat/DXContainerConstants.def" +}; + using ResourceFlags = dxbc::PSV::ResourceFlags; using ResourceBindInfo = dxbc::PSV::v2::ResourceBindInfo; @@ -159,6 +176,7 @@ struct Part { std::optional Hash; std::optional Info; std::optional Signature; + std::optional RootSignature; }; struct Object { @@ -241,6 +259,11 @@ template <> struct MappingTraits { static void mapping(IO &IO, llvm::DXContainerYAML::Signature &El); }; +template <> struct MappingTraits { + static void mapping(IO &IO, + DXContainerYAML::RootSignatureDesc &RootSignature); +}; + } // namespace yaml } // namespace llvm diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h index 1458318ff021a..7f91dd7ebf49d 100644 --- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h +++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h @@ -50,7 +50,6 @@ #include "llvm/CodeGen/MachineLICM.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachinePassManager.h" -#include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/MachineVerifier.h" #include "llvm/CodeGen/OptimizePHIs.h" #include "llvm/CodeGen/PHIElimination.h" @@ -961,7 +960,7 @@ Error CodeGenPassBuilder::addMachinePasses( if (getOptLevel() != CodeGenOptLevel::None && !TM.targetSchedulesPostRAScheduling()) { if (Opt.MISchedPostRA) - addPass(PostMachineSchedulerPass(&TM)); + addPass(PostMachineSchedulerPass()); else addPass(PostRASchedulerPass(&TM)); } @@ -1145,7 +1144,7 @@ void CodeGenPassBuilder::addOptimizedRegAlloc( addPass(RenameIndependentSubregsPass()); // PreRA instruction scheduling. - addPass(MachineSchedulerPass(&TM)); + addPass(MachineSchedulerPass()); if (derived().addRegAssignmentOptimized(addPass)) { // Allow targets to expand pseudo instructions depending on the choice of diff --git a/llvm/include/llvm/Passes/MachinePassRegistry.def b/llvm/include/llvm/Passes/MachinePassRegistry.def index e6b4a4b0a56ae..9f9922dfa5673 100644 --- a/llvm/include/llvm/Passes/MachinePassRegistry.def +++ b/llvm/include/llvm/Passes/MachinePassRegistry.def @@ -142,13 +142,11 @@ MACHINE_FUNCTION_PASS("finalize-isel", FinalizeISelPass()) MACHINE_FUNCTION_PASS("localstackalloc", LocalStackSlotAllocationPass()) MACHINE_FUNCTION_PASS("machine-cp", MachineCopyPropagationPass()) MACHINE_FUNCTION_PASS("machine-cse", MachineCSEPass()) -MACHINE_FUNCTION_PASS("machine-scheduler", MachineSchedulerPass(TM)) MACHINE_FUNCTION_PASS("machinelicm", MachineLICMPass()) MACHINE_FUNCTION_PASS("no-op-machine-function", NoOpMachineFunctionPass()) MACHINE_FUNCTION_PASS("opt-phis", OptimizePHIsPass()) MACHINE_FUNCTION_PASS("peephole-opt", PeepholeOptimizerPass()) MACHINE_FUNCTION_PASS("phi-node-elimination", PHIEliminationPass()) -MACHINE_FUNCTION_PASS("postmisched", PostMachineSchedulerPass(TM)) MACHINE_FUNCTION_PASS("post-RA-sched", PostRASchedulerPass(TM)) MACHINE_FUNCTION_PASS("print", PrintMIRPass()) MACHINE_FUNCTION_PASS("print", LiveDebugVariablesPrinterPass(errs())) @@ -245,11 +243,13 @@ DUMMY_MACHINE_FUNCTION_PASS("static-data-splitter", StaticDataSplitter) DUMMY_MACHINE_FUNCTION_PASS("machine-function-splitter", MachineFunctionSplitterPass) DUMMY_MACHINE_FUNCTION_PASS("machine-latecleanup", MachineLateInstrsCleanupPass) DUMMY_MACHINE_FUNCTION_PASS("machine-sanmd", MachineSanitizerBinaryMetadata) +DUMMY_MACHINE_FUNCTION_PASS("machine-scheduler", MachineSchedulerPass) DUMMY_MACHINE_FUNCTION_PASS("machine-sink", MachineSinkingPass) DUMMY_MACHINE_FUNCTION_PASS("machine-uniformity", MachineUniformityInfoWrapperPass) DUMMY_MACHINE_FUNCTION_PASS("machineinstr-printer", MachineFunctionPrinterPass) DUMMY_MACHINE_FUNCTION_PASS("mirfs-discriminators", MIRAddFSDiscriminatorsPass) DUMMY_MACHINE_FUNCTION_PASS("patchable-function", PatchableFunctionPass) +DUMMY_MACHINE_FUNCTION_PASS("postmisched", PostMachineSchedulerPass) DUMMY_MACHINE_FUNCTION_PASS("postra-machine-sink", PostRAMachineSinkingPass) DUMMY_MACHINE_FUNCTION_PASS("postrapseudos", ExpandPostRAPseudosPass) DUMMY_MACHINE_FUNCTION_PASS("print-machine-cycles", MachineCycleInfoPrinterPass) diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h index 1b54855a5c6f4..51ccaa53447d7 100644 --- a/llvm/include/llvm/Passes/PassBuilder.h +++ b/llvm/include/llvm/Passes/PassBuilder.h @@ -60,6 +60,10 @@ class PipelineTuningOptions { /// Tuning option to enable/disable loop unrolling. Its default value is true. bool LoopUnrolling; + /// Tuning option to enable/disable loop interchange. Its default value is + /// false. + bool LoopInterchange; + /// Tuning option to forget all SCEV loops in LoopUnroll. Its default value /// is that of the flag: `-forget-scev-loop-unroll`. bool ForgetAllSCEVInLoopUnroll; diff --git a/llvm/include/llvm/SandboxIR/Tracker.h b/llvm/include/llvm/SandboxIR/Tracker.h index 9a031f3270837..f7b469965eae8 100644 --- a/llvm/include/llvm/SandboxIR/Tracker.h +++ b/llvm/include/llvm/SandboxIR/Tracker.h @@ -440,8 +440,9 @@ class ShuffleVectorSetMask final : public IRChangeBase { class Tracker { public: enum class TrackerState { - Disabled, ///> Tracking is disabled - Record, ///> Tracking changes + Disabled, ///> Tracking is disabled + Record, ///> Tracking changes + Reverting, ///> Reverting changes }; private: @@ -473,6 +474,8 @@ class Tracker { ~Tracker(); Context &getContext() const { return Ctx; } + /// \Returns true if there are no changes tracked. + bool empty() const { return Changes.empty(); } /// Record \p Change and take ownership. This is the main function used to /// track Sandbox IR changes. void track(std::unique_ptr &&Change) { diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h new file mode 100644 index 0000000000000..fce9cc0c1bde7 --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h @@ -0,0 +1,30 @@ +//===- TransactionAcceptOrRevert.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is a region pass that checks the region cost before/after vectorization +// and accepts the state of Sandbox IR if the cost is better, or otherwise +// reverts it. +// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONACCEPTORREVERT_H +#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONACCEPTORREVERT_H + +#include "llvm/SandboxIR/Pass.h" +#include "llvm/SandboxIR/Region.h" + +namespace llvm::sandboxir { + +class TransactionAcceptOrRevert : public RegionPass { +public: + TransactionAcceptOrRevert() : RegionPass("tr-accept-or-revert") {} + bool runOnRegion(Region &Rgn, const Analyses &A) final; +}; + +} // namespace llvm::sandboxir + +#endif // LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONACCEPTORREVERT_H diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAlwaysAccept.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAlwaysAccept.h new file mode 100644 index 0000000000000..ed6cf1bf7cf51 --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAlwaysAccept.h @@ -0,0 +1,34 @@ +//===- TransactionAlwaysAccept.h --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is a region pass that always accepts the transaction without checking +// its cost. This is mainly used as a final pass in lit tests. +// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONALWAYSACCEPT_H +#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONALWAYSACCEPT_H + +#include "llvm/SandboxIR/Pass.h" +#include "llvm/SandboxIR/Region.h" + +namespace llvm::sandboxir { + +class TransactionAlwaysAccept : public RegionPass { +public: + TransactionAlwaysAccept() : RegionPass("tr-accept") {} + bool runOnRegion(Region &Rgn, const Analyses &A) final { + auto &Tracker = Rgn.getContext().getTracker(); + bool HasChanges = !Tracker.empty(); + Tracker.accept(); + return HasChanges; + } +}; + +} // namespace llvm::sandboxir + +#endif // LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONALWAYSACCEPT_H diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6eba6c0f08c3f..8a9ad55366ee7 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -8599,7 +8599,7 @@ bool llvm::isKnownInversion(const Value *X, const Value *Y) { return false; // They must both have samesign flag or not. - if (cast(X)->hasSameSign() != cast(Y)->hasSameSign()) + if (Pred1.hasSameSign() != Pred2.hasSameSign()) return false; if (B == C) @@ -8611,8 +8611,7 @@ bool llvm::isKnownInversion(const Value *X, const Value *Y) { return false; // Sign bits of two RHSCs should match. - if (cast(X)->hasSameSign() && - RHSC1->isNonNegative() != RHSC2->isNonNegative()) + if (Pred1.hasSameSign() && RHSC1->isNonNegative() != RHSC2->isNonNegative()) return false; const auto CR1 = ConstantRange::makeExactICmpRegion(Pred1, *RHSC1); diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index 35df2a479a545..d69a24f00871e 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -94,7 +94,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeModuloScheduleTestPass(Registry); initializeMachinePostDominatorTreeWrapperPassPass(Registry); initializeMachineRegionInfoPassPass(Registry); - initializeMachineSchedulerLegacyPass(Registry); + initializeMachineSchedulerPass(Registry); initializeMachineSinkingPass(Registry); initializeMachineUniformityAnalysisPassPass(Registry); initializeMachineUniformityInfoPrinterPassPass(Registry); @@ -105,7 +105,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializePHIEliminationPass(Registry); initializePatchableFunctionPass(Registry); initializePeepholeOptimizerLegacyPass(Registry); - initializePostMachineSchedulerLegacyPass(Registry); + initializePostMachineSchedulerPass(Registry); initializePostRAHazardRecognizerPass(Registry); initializePostRAMachineSinkingPass(Registry); initializePostRASchedulerLegacyPass(Registry); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 460749a739c76..4d9d7128f73a8 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -1065,8 +1065,9 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { assert(!MRI->isReserved(DestReg)); // Update matching debug values, if any. - SmallVector MaybeDeadDbgUsers( - CopyDbgUsers[MaybeDead].begin(), CopyDbgUsers[MaybeDead].end()); + const auto &DbgUsers = CopyDbgUsers[MaybeDead]; + SmallVector MaybeDeadDbgUsers(DbgUsers.begin(), + DbgUsers.end()); MRI->updateDbgUsersToReg(DestReg.asMCReg(), SrcReg.asMCReg(), MaybeDeadDbgUsers); @@ -1238,8 +1239,9 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock( isCopyInstr(*Copy, *TII, UseCopyInstr); Register Src = CopyOperands->Source->getReg(); Register Def = CopyOperands->Destination->getReg(); - SmallVector MaybeDeadDbgUsers(CopyDbgUsers[Copy].begin(), - CopyDbgUsers[Copy].end()); + const auto &DbgUsers = CopyDbgUsers[Copy]; + SmallVector MaybeDeadDbgUsers(DbgUsers.begin(), + DbgUsers.end()); MRI->updateDbgUsersToReg(Src.asMCReg(), Def.asMCReg(), MaybeDeadDbgUsers); Copy->eraseFromParent(); diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index df90077b15f33..3f72e8486c06e 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -216,85 +216,67 @@ MachineSchedContext::~MachineSchedContext() { namespace { -/// Base class for the machine scheduler classes. -class MachineSchedulerBase : public MachineSchedContext { -protected: - void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); -}; - -/// Impl class for MachineScheduler. -class MachineSchedulerImpl : public MachineSchedulerBase { - MachineFunctionPass *P = nullptr; - MachineFunctionAnalysisManager *MFAM = nullptr; - -public: - MachineSchedulerImpl(MachineFunction &Func, MachineFunctionPass *P); - MachineSchedulerImpl(MachineFunction &Func, - MachineFunctionAnalysisManager &MFAM, - const TargetMachine *TargetM); - bool run(); - -protected: - ScheduleDAGInstrs *createMachineScheduler(); -}; - -/// Impl class for PostMachineScheduler. -class PostMachineSchedulerImpl : public MachineSchedulerBase { - MachineFunctionPass *P = nullptr; - MachineFunctionAnalysisManager *MFAM = nullptr; - +/// Base class for a machine scheduler class that can run at any point. +class MachineSchedulerBase : public MachineSchedContext, + public MachineFunctionPass { public: - PostMachineSchedulerImpl(MachineFunction &Func, MachineFunctionPass *P); - PostMachineSchedulerImpl(MachineFunction &Func, - MachineFunctionAnalysisManager &MFAM, - const TargetMachine *TargetM); - bool run(); + MachineSchedulerBase(char &ID) : MachineFunctionPass(ID) {} protected: - ScheduleDAGInstrs *createPostMachineScheduler(); + void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags); }; /// MachineScheduler runs after coalescing and before register allocation. -class MachineSchedulerLegacy : public MachineFunctionPass { +class MachineScheduler : public MachineSchedulerBase { public: - MachineSchedulerLegacy(); + MachineScheduler(); + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnMachineFunction(MachineFunction&) override; static char ID; // Class identification, replacement for typeinfo + +protected: + ScheduleDAGInstrs *createMachineScheduler(); }; /// PostMachineScheduler runs after shortly before code emission. -class PostMachineSchedulerLegacy : public MachineFunctionPass { +class PostMachineScheduler : public MachineSchedulerBase { public: - PostMachineSchedulerLegacy(); + PostMachineScheduler(); + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnMachineFunction(MachineFunction&) override; static char ID; // Class identification, replacement for typeinfo + +protected: + ScheduleDAGInstrs *createPostMachineScheduler(); }; } // end anonymous namespace -char MachineSchedulerLegacy::ID = 0; +char MachineScheduler::ID = 0; -char &llvm::MachineSchedulerID = MachineSchedulerLegacy::ID; +char &llvm::MachineSchedulerID = MachineScheduler::ID; -INITIALIZE_PASS_BEGIN(MachineSchedulerLegacy, DEBUG_TYPE, +INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE, "Machine Instruction Scheduler", false, false) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass) INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) -INITIALIZE_PASS_END(MachineSchedulerLegacy, DEBUG_TYPE, +INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE, "Machine Instruction Scheduler", false, false) -MachineSchedulerLegacy::MachineSchedulerLegacy() : MachineFunctionPass(ID) { - initializeMachineSchedulerLegacyPass(*PassRegistry::getPassRegistry()); +MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) { + initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); } -void MachineSchedulerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { +void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); @@ -307,24 +289,23 @@ void MachineSchedulerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { MachineFunctionPass::getAnalysisUsage(AU); } -char PostMachineSchedulerLegacy::ID = 0; +char PostMachineScheduler::ID = 0; -char &llvm::PostMachineSchedulerID = PostMachineSchedulerLegacy::ID; +char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID; -INITIALIZE_PASS_BEGIN(PostMachineSchedulerLegacy, "postmisched", +INITIALIZE_PASS_BEGIN(PostMachineScheduler, "postmisched", "PostRA Machine Instruction Scheduler", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) -INITIALIZE_PASS_END(PostMachineSchedulerLegacy, "postmisched", +INITIALIZE_PASS_END(PostMachineScheduler, "postmisched", "PostRA Machine Instruction Scheduler", false, false) -PostMachineSchedulerLegacy::PostMachineSchedulerLegacy() - : MachineFunctionPass(ID) { - initializePostMachineSchedulerLegacyPass(*PassRegistry::getPassRegistry()); +PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) { + initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry()); } -void PostMachineSchedulerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { +void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); @@ -403,40 +384,18 @@ nextIfDebug(MachineBasicBlock::iterator I, .getNonConstIterator(); } -MachineSchedulerImpl::MachineSchedulerImpl(MachineFunction &Func, - MachineFunctionPass *P) - : P(P) { - MF = &Func; - MLI = &P->getAnalysis().getLI(); - MDT = &P->getAnalysis().getDomTree(); - TM = &P->getAnalysis().getTM(); - AA = &P->getAnalysis().getAAResults(); - LIS = &P->getAnalysis().getLIS(); -} - -MachineSchedulerImpl::MachineSchedulerImpl(MachineFunction &Func, - MachineFunctionAnalysisManager &MFAM, - const TargetMachine *TargetM) - : MFAM(&MFAM) { - MF = &Func; - TM = TargetM; - MLI = &MFAM.getResult(Func); - MDT = &MFAM.getResult(Func); - auto &FAM = MFAM.getResult(Func) - .getManager(); - AA = &FAM.getResult(Func.getFunction()); - LIS = &MFAM.getResult(Func); -} - /// Instantiate a ScheduleDAGInstrs that will be owned by the caller. -ScheduleDAGInstrs *MachineSchedulerImpl::createMachineScheduler() { +ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() { // Select the scheduler, or set the default. MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; if (Ctor != useDefaultMachineSched) return Ctor(this); + const TargetMachine &TM = + getAnalysis().getTM(); + // Get the default scheduler set by the target for this function. - ScheduleDAGInstrs *Scheduler = TM->createMachineScheduler(this); + ScheduleDAGInstrs *Scheduler = TM.createMachineScheduler(this); if (Scheduler) return Scheduler; @@ -444,60 +403,14 @@ ScheduleDAGInstrs *MachineSchedulerImpl::createMachineScheduler() { return createGenericSchedLive(this); } -bool MachineSchedulerImpl::run() { - if (VerifyScheduling) { - LLVM_DEBUG(LIS->dump()); - const char *MSchedBanner = "Before machine scheduling."; - if (P) - MF->verify(P, MSchedBanner, &errs()); - else - MF->verify(*MFAM, MSchedBanner, &errs()); - } - RegClassInfo->runOnMachineFunction(*MF); - - // Instantiate the selected scheduler for this target, function, and - // optimization level. - std::unique_ptr Scheduler(createMachineScheduler()); - scheduleRegions(*Scheduler, false); - - LLVM_DEBUG(LIS->dump()); - if (VerifyScheduling) { - const char *MSchedBanner = "After machine scheduling."; - if (P) - MF->verify(P, MSchedBanner, &errs()); - else - MF->verify(*MFAM, MSchedBanner, &errs()); - } - return true; -} - -PostMachineSchedulerImpl::PostMachineSchedulerImpl(MachineFunction &Func, - MachineFunctionPass *P) - : P(P) { - MF = &Func; - MLI = &P->getAnalysis().getLI(); - TM = &P->getAnalysis().getTM(); - AA = &P->getAnalysis().getAAResults(); -} - -PostMachineSchedulerImpl::PostMachineSchedulerImpl( - MachineFunction &Func, MachineFunctionAnalysisManager &MFAM, - const TargetMachine *TargetM) - : MFAM(&MFAM) { - MF = &Func; - TM = TargetM; - MLI = &MFAM.getResult(Func); - auto &FAM = MFAM.getResult(Func) - .getManager(); - AA = &FAM.getResult(Func.getFunction()); -} - /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by /// the caller. We don't have a command line option to override the postRA /// scheduler. The Target must configure it. -ScheduleDAGInstrs *PostMachineSchedulerImpl::createPostMachineScheduler() { +ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { + const TargetMachine &TM = + getAnalysis().getTM(); // Get the postRA scheduler set by the target for this function. - ScheduleDAGInstrs *Scheduler = TM->createPostMachineScheduler(this); + ScheduleDAGInstrs *Scheduler = TM.createPostMachineScheduler(this); if (Scheduler) return Scheduler; @@ -505,30 +418,6 @@ ScheduleDAGInstrs *PostMachineSchedulerImpl::createPostMachineScheduler() { return createGenericSchedPostRA(this); } -bool PostMachineSchedulerImpl::run() { - if (VerifyScheduling) { - const char *PostMSchedBanner = "Before post machine scheduling."; - if (P) - MF->verify(P, PostMSchedBanner, &errs()); - else - MF->verify(*MFAM, PostMSchedBanner, &errs()); - } - - // Instantiate the selected scheduler for this target, function, and - // optimization level. - std::unique_ptr Scheduler(createPostMachineScheduler()); - scheduleRegions(*Scheduler, true); - - if (VerifyScheduling) { - const char *PostMSchedBanner = "After post machine scheduling."; - if (P) - MF->verify(P, PostMSchedBanner, &errs()); - else - MF->verify(*MFAM, PostMSchedBanner, &errs()); - } - return true; -} - /// Top-level MachineScheduler pass driver. /// /// Visit blocks in function order. Divide each block into scheduling regions @@ -545,84 +434,72 @@ bool PostMachineSchedulerImpl::run() { /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler /// design would be to split blocks at scheduling boundaries, but LLVM has a /// general bias against block splitting purely for implementation simplicity. -bool MachineSchedulerLegacy::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(MF.getFunction())) +bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { + if (skipFunction(mf.getFunction())) return false; if (EnableMachineSched.getNumOccurrences()) { if (!EnableMachineSched) return false; - } else if (!MF.getSubtarget().enableMachineScheduler()) { + } else if (!mf.getSubtarget().enableMachineScheduler()) return false; - } - LLVM_DEBUG(dbgs() << "Before MISched:\n"; MF.print(dbgs())); + LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs())); - MachineSchedulerImpl Impl(MF, this); - return Impl.run(); -} + // Initialize the context of the pass. + MF = &mf; + MLI = &getAnalysis().getLI(); + MDT = &getAnalysis().getDomTree(); + AA = &getAnalysis().getAAResults(); -PreservedAnalyses -MachineSchedulerPass::run(MachineFunction &MF, - MachineFunctionAnalysisManager &MFAM) { - if (EnableMachineSched.getNumOccurrences()) { - if (!EnableMachineSched) - return PreservedAnalyses::all(); - } else if (!MF.getSubtarget().enableMachineScheduler()) { - return PreservedAnalyses::all(); - } + LIS = &getAnalysis().getLIS(); - LLVM_DEBUG(dbgs() << "Before MISched:\n"; MF.print(dbgs())); + if (VerifyScheduling) { + LLVM_DEBUG(LIS->dump()); + MF->verify(this, "Before machine scheduling.", &errs()); + } + RegClassInfo->runOnMachineFunction(*MF); - MachineSchedulerImpl Impl(MF, MFAM, TM); - bool Changed = Impl.run(); - if (!Changed) - return PreservedAnalyses::all(); + // Instantiate the selected scheduler for this target, function, and + // optimization level. + std::unique_ptr Scheduler(createMachineScheduler()); + scheduleRegions(*Scheduler, false); - PreservedAnalyses PA = getMachineFunctionPassPreservedAnalyses(); - PA.preserveSet(); - PA.preserve(); - PA.preserve(); - return PA; + LLVM_DEBUG(LIS->dump()); + if (VerifyScheduling) + MF->verify(this, "After machine scheduling.", &errs()); + return true; } -bool PostMachineSchedulerLegacy::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(MF.getFunction())) +bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { + if (skipFunction(mf.getFunction())) return false; if (EnablePostRAMachineSched.getNumOccurrences()) { if (!EnablePostRAMachineSched) return false; - } else if (!MF.getSubtarget().enablePostRAMachineScheduler()) { + } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) { LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); return false; } - LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; MF.print(dbgs())); + LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs())); - PostMachineSchedulerImpl Impl(MF, this); - return Impl.run(); -} + // Initialize the context of the pass. + MF = &mf; + MLI = &getAnalysis().getLI(); + AA = &getAnalysis().getAAResults(); -PreservedAnalyses -PostMachineSchedulerPass::run(MachineFunction &MF, - MachineFunctionAnalysisManager &MFAM) { - if (EnablePostRAMachineSched.getNumOccurrences()) { - if (!EnablePostRAMachineSched) - return PreservedAnalyses::all(); - } else if (!MF.getSubtarget().enablePostRAMachineScheduler()) { - LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n"); - return PreservedAnalyses::all(); - } - LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; MF.print(dbgs())); + if (VerifyScheduling) + MF->verify(this, "Before post machine scheduling.", &errs()); - PostMachineSchedulerImpl Impl(MF, MFAM, TM); - bool Changed = Impl.run(); - if (!Changed) - return PreservedAnalyses::all(); + // Instantiate the selected scheduler for this target, function, and + // optimization level. + std::unique_ptr Scheduler(createPostMachineScheduler()); + scheduleRegions(*Scheduler, true); - PreservedAnalyses PA = getMachineFunctionPassPreservedAnalyses(); - PA.preserveSet(); - return PA; + if (VerifyScheduling) + MF->verify(this, "After post machine scheduling.", &errs()); + return true; } /// Return true of the given instruction should not be included in a scheduling diff --git a/llvm/lib/CodeGen/RDFGraph.cpp b/llvm/lib/CodeGen/RDFGraph.cpp index 483e61db788f4..805b0ee7be0bc 100644 --- a/llvm/lib/CodeGen/RDFGraph.cpp +++ b/llvm/lib/CodeGen/RDFGraph.cpp @@ -966,15 +966,18 @@ void DataFlowGraph::build(const Config &config) { // Build a map "PhiM" which will contain, for each block, the set // of references that will require phi definitions in that block. + // "PhiClobberM" map contains references that require phis for clobbering defs BlockRefsMap PhiM(getPRI()); + BlockRefsMap PhiClobberM(getPRI()); for (Block BA : Blocks) - recordDefsForDF(PhiM, BA); + recordDefsForDF(PhiM, PhiClobberM, BA); for (Block BA : Blocks) buildPhis(PhiM, BA); // Link all the refs. This will recursively traverse the dominator tree. + // Phis for clobbering defs are added here. DefStackMap DM; - linkBlockRefs(DM, EA); + linkBlockRefs(DM, PhiClobberM, EA); // Finally, remove all unused phi nodes. if (!(BuildCfg.Options & BuildOptions::KeepDeadPhis)) @@ -1378,7 +1381,9 @@ void DataFlowGraph::buildStmt(Block BA, MachineInstr &In) { // Scan all defs in the block node BA and record in PhiM the locations of // phi nodes corresponding to these defs. -void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { +// Clobbering defs in BA are recorded in PhiClobberM +void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, + BlockRefsMap &PhiClobberM, Block BA) { // Check all defs from block BA and record them in each block in BA's // iterated dominance frontier. This information will later be used to // create phi nodes. @@ -1394,11 +1399,17 @@ void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { // This is done to make sure that each defined reference gets only one // phi node, even if it is defined multiple times. RegisterAggr Defs(getPRI()); + RegisterAggr ClobberDefs(getPRI()); for (Instr IA : BA.Addr->members(*this)) { for (Ref RA : IA.Addr->members_if(IsDef, *this)) { RegisterRef RR = RA.Addr->getRegRef(*this); - if (RR.isReg() && isTracked(RR)) + if (!isTracked(RR)) + continue; + if (RR.isReg()) Defs.insert(RR); + // Clobbering def + else if (RR.isMask()) + ClobberDefs.insert(RR); } } @@ -1416,12 +1427,14 @@ void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { for (auto *DB : IDF) { Block DBA = findBlock(DB); PhiM[DBA.Id].insert(Defs); + PhiClobberM[DBA.Id].insert(ClobberDefs); } } // Given the locations of phi nodes in the map PhiM, create the phi nodes // that are located in the block node BA. -void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA) { +void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA, + const DefStackMap &DefM) { // Check if this blocks has any DF defs, i.e. if there are any defs // that this block is in the iterated dominance frontier of. auto HasDF = PhiM.find(BA.Id); @@ -1434,10 +1447,37 @@ void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA) { for (MachineBasicBlock *PB : MBB->predecessors()) Preds.push_back(findBlock(PB)); + RegisterAggr PhiDefs(getPRI()); + // DefM will be non empty when we are building phis + // for clobbering defs + if (!DefM.empty()) { + for (Instr IA : BA.Addr->members_if(IsPhi, *this)) { + for (Def DA : IA.Addr->members_if(IsDef, *this)) { + auto DR = DA.Addr->getRegRef(*this); + PhiDefs.insert(DR); + } + } + } + + MachineRegisterInfo &MRI = MF.getRegInfo(); const RegisterAggr &Defs = PhiM[BA.Id]; uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; for (RegisterRef RR : Defs.refs()) { + if (!DefM.empty()) { + auto F = DefM.find(RR.Reg); + // Do not create a phi for unallocatable registers, or for registers + // that are never livein to BA. + // If a phi exists for RR, do not create another. + if (!MRI.isAllocatable(RR.Reg) || PhiDefs.hasCoverOf(RR) || + F == DefM.end() || F->second.empty()) + continue; + // Do not create a phi, if all reaching defs are clobbering + auto RDef = F->second.top(); + if (RDef->Addr->getFlags() & NodeAttrs::Clobbering) + continue; + PhiDefs.insert(RR); + } Phi PA = newPhi(BA); PA.Addr->addMember(newDef(PA, RR, PhiFlags), *this); @@ -1576,7 +1616,15 @@ void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, Stmt SA, Predicate P) { // Create data-flow links for all instructions in the block node BA. This // will include updating any phi nodes in BA. -void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, Block BA) { +void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, BlockRefsMap &PhiClobberM, + Block BA) { + // Create phi nodes for clobbering defs. + // Since a huge number of registers can get clobbered, it would result in many + // phi nodes being created in the graph. Only create phi nodes that have a non + // clobbering reaching def. Use DefM to get not clobbering defs reaching a + // block. + buildPhis(PhiClobberM, BA, DefM); + // Push block delimiters. markBlock(BA.Id, DefM); @@ -1613,7 +1661,7 @@ void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, Block BA) { for (auto *I : *N) { MachineBasicBlock *SB = I->getBlock(); Block SBA = findBlock(SB); - linkBlockRefs(DefM, SBA); + linkBlockRefs(DefM, PhiClobberM, SBA); } // Link the phi uses from the successor blocks. diff --git a/llvm/lib/CodeGen/RegAllocBasic.cpp b/llvm/lib/CodeGen/RegAllocBasic.cpp index 51e047b2fa3f0..e1f05406297d2 100644 --- a/llvm/lib/CodeGen/RegAllocBasic.cpp +++ b/llvm/lib/CodeGen/RegAllocBasic.cpp @@ -135,7 +135,7 @@ INITIALIZE_PASS_DEPENDENCY(LiveDebugVariablesWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass) INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) INITIALIZE_PASS_DEPENDENCY(RegisterCoalescerLegacy) -INITIALIZE_PASS_DEPENDENCY(MachineSchedulerLegacy) +INITIALIZE_PASS_DEPENDENCY(MachineScheduler) INITIALIZE_PASS_DEPENDENCY(LiveStacksWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 2e43ad78e5d9b..465c4e8feffbb 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -155,7 +155,7 @@ INITIALIZE_PASS_DEPENDENCY(LiveDebugVariablesWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass) INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) INITIALIZE_PASS_DEPENDENCY(RegisterCoalescerLegacy) -INITIALIZE_PASS_DEPENDENCY(MachineSchedulerLegacy) +INITIALIZE_PASS_DEPENDENCY(MachineScheduler) INITIALIZE_PASS_DEPENDENCY(LiveStacksWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 6c9c96ceaa4ba..f61928a66eb3c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -4609,12 +4609,15 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) { ExpandFPLibCall(Node, RTLIB::LDEXP_F32, RTLIB::LDEXP_F64, RTLIB::LDEXP_F80, RTLIB::LDEXP_F128, RTLIB::LDEXP_PPCF128, Results); break; + case ISD::FMODF: case ISD::FFREXP: { - RTLIB::Libcall LC = RTLIB::getFREXP(Node->getValueType(0)); + EVT VT = Node->getValueType(0); + RTLIB::Libcall LC = Node->getOpcode() == ISD::FMODF ? RTLIB::getMODF(VT) + : RTLIB::getFREXP(VT); bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, /*CallRetResNo=*/0); if (!Expanded) - llvm_unreachable("Expected scalar FFREXP to expand to libcall!"); + llvm_unreachable("Expected scalar FFREXP/FMODF to expand to libcall!"); break; } case ISD::FPOWI: @@ -5503,9 +5506,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) { Results.push_back(Tmp2.getValue(1)); break; } + case ISD::FMODF: case ISD::FSINCOS: { Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); - Tmp2 = DAG.getNode(ISD::FSINCOS, dl, DAG.getVTList(NVT, NVT), Tmp1, + Tmp2 = DAG.getNode(Node->getOpcode(), dl, DAG.getVTList(NVT, NVT), Tmp1, Node->getFlags()); Tmp3 = DAG.getIntPtrConstant(0, dl, /*isTarget=*/true); for (unsigned ResNum = 0; ResNum < Node->getNumValues(); ResNum++) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 71f100bfa0343..2a4eed1ed527a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -2766,10 +2766,10 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) { case ISD::FLDEXP: R = PromoteFloatRes_ExpOp(N); break; case ISD::FFREXP: R = PromoteFloatRes_FFREXP(N); break; + case ISD::FMODF: case ISD::FSINCOS: R = PromoteFloatRes_UnaryWithTwoFPResults(N); break; - case ISD::FP_ROUND: R = PromoteFloatRes_FP_ROUND(N); break; case ISD::STRICT_FP_ROUND: R = PromoteFloatRes_STRICT_FP_ROUND(N); @@ -3228,6 +3228,7 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) { case ISD::FFREXP: R = SoftPromoteHalfRes_FFREXP(N); break; + case ISD::FMODF: case ISD::FSINCOS: R = SoftPromoteHalfRes_UnaryWithTwoFPResults(N); break; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 6ad08bce44b0a..416da1bb7bfcf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -454,6 +454,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { case ISD::UMULO: case ISD::FCANONICALIZE: case ISD::FFREXP: + case ISD::FMODF: case ISD::FSINCOS: case ISD::SADDSAT: case ISD::UADDSAT: @@ -1223,6 +1224,14 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { return; break; } + case ISD::FMODF: { + RTLIB::Libcall LC = + RTLIB::getMODF(Node->getValueType(0).getVectorElementType()); + if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, + /*CallRetResNo=*/0)) + return; + break; + } case ISD::VECTOR_COMPRESS: Results.push_back(TLI.expandVECTOR_COMPRESS(Node, DAG)); return; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index e00be6c395876..da793a34879b8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -133,6 +133,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { case ISD::ADDRSPACECAST: R = ScalarizeVecRes_ADDRSPACECAST(N); break; + case ISD::FMODF: case ISD::FFREXP: case ISD::FSINCOS: R = ScalarizeVecRes_UnaryOpWithTwoResults(N, ResNo); @@ -1261,6 +1262,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { case ISD::ADDRSPACECAST: SplitVecRes_ADDRSPACECAST(N, Lo, Hi); break; + case ISD::FMODF: case ISD::FFREXP: case ISD::FSINCOS: SplitVecRes_UnaryOpWithTwoResults(N, ResNo, Lo, Hi); @@ -4811,6 +4813,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::VP_FSHR: Res = WidenVecRes_Ternary(N); break; + case ISD::FMODF: case ISD::FFREXP: case ISD::FSINCOS: { if (!unrollExpandedOp()) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 4e1ce6af3abc8..4a9ac8580e4e2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6977,6 +6977,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags)); return; + case Intrinsic::modf: case Intrinsic::sincos: case Intrinsic::frexp: { unsigned Opcode; @@ -6986,6 +6987,9 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, case Intrinsic::sincos: Opcode = ISD::FSINCOS; break; + case Intrinsic::modf: + Opcode = ISD::FMODF; + break; case Intrinsic::frexp: Opcode = ISD::FFREXP; break; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index f63c8dd3df1c8..7b1a2d640a2bd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -219,6 +219,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::FCOS: return "fcos"; case ISD::STRICT_FCOS: return "strict_fcos"; case ISD::FSINCOS: return "fsincos"; + case ISD::FMODF: return "fmodf"; case ISD::FTAN: return "ftan"; case ISD::STRICT_FTAN: return "strict_ftan"; case ISD::FASIN: return "fasin"; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 9c56912aa6ba0..1f39ec205c517 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -407,6 +407,11 @@ RTLIB::Libcall RTLIB::getFSINCOS(EVT RetVT) { SINCOS_PPCF128); } +RTLIB::Libcall RTLIB::getMODF(EVT RetVT) { + return getFPLibCall(RetVT, MODF_F32, MODF_F64, MODF_F80, MODF_F128, + MODF_PPCF128); +} + RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4], AtomicOrdering Order, uint64_t MemSize) { @@ -775,9 +780,9 @@ void TargetLoweringBase::initActions() { setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); // These library functions default to expand. - setOperationAction( - {ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP, ISD::FSINCOS}, VT, - Expand); + setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP, + ISD::FSINCOS, ISD::FMODF}, + VT, Expand); // These operations default to expand for vector types. if (VT.isVector()) diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp index adcd0aa329420..62bf3d4ecaaf0 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp @@ -1274,13 +1274,14 @@ Error DWARFDebugLine::LineTable::parse( // Sort all sequences so that address lookup will work faster. if (!Sequences.empty()) { - llvm::sort(Sequences, Sequence::orderByHighPC); + llvm::stable_sort(Sequences, Sequence::orderByHighPC); // Note: actually, instruction address ranges of sequences should not // overlap (in shared objects and executables). If they do, the address // lookup would still work, though, but result would be ambiguous. // We don't report warning in this case. For example, // sometimes .so compiled from multiple object files contains a few // rudimentary sequences for address ranges [0x0, 0xsomething). + // Address ranges may also overlap when using ICF. } // Terminate the table with a final blank line to clearly delineate it from diff --git a/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp b/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp index b51fa24be76d1..7b38150ab4b65 100644 --- a/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp +++ b/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp @@ -9,8 +9,7 @@ #include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h" #include "llvm/ExecutionEngine/Orc/Core.h" -#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h" -#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h" +#include "llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h" #include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h" #include "llvm/Support/Process.h" #include "llvm/TargetParser/Host.h" @@ -49,12 +48,12 @@ SelfExecutorProcessControl::SelfExecutorProcessControl( if (this->TargetTriple.isOSBinFormatMachO()) GlobalManglingPrefix = '_'; - this->BootstrapSymbols[rt::RegisterEHFrameSectionWrapperName] = - ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper); - this->BootstrapSymbols[rt::DeregisterEHFrameSectionWrapperName] = - ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper); + addDefaultBootstrapValuesForHostProcess(BootstrapMap, BootstrapSymbols); #ifdef __APPLE__ + // FIXME: Don't add an UnwindInfoManager by default -- it's redundant when + // the ORC runtime is loaded. We'll need a way to document this and + // allow clients to choose. this->UnwindInfoMgr = UnwindInfoManager::TryCreate(); if (this->UnwindInfoMgr) this->UnwindInfoMgr->addBootstrapSymbols(this->BootstrapSymbols); diff --git a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp index 938fe58ef85cf..dd844ae3a42bc 100644 --- a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp +++ b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp @@ -1221,22 +1221,37 @@ Expected setUpGenericLLVMIRPlatform(LLJIT &J) { if (auto *OLL = dyn_cast(&J.getObjLinkingLayer())) { - bool CompactUnwindInfoSupported = false; + bool UseEHFrames = true; // Enable compact-unwind support if possible. if (J.getTargetTriple().isOSDarwin() || J.getTargetTriple().isOSBinFormatMachO()) { - if (auto UIRP = UnwindInfoRegistrationPlugin::Create( - J.getIRCompileLayer(), PlatformJD)) { - CompactUnwindInfoSupported = true; - OLL->addPlugin(std::move(*UIRP)); - LLVM_DEBUG(dbgs() << "Enabled compact-unwind support.\n"); - } else - consumeError(UIRP.takeError()); + + // Check if the bootstrap map says that we should force eh-frames: + // Older libunwinds require this as they don't have a dynamic + // registration API for compact-unwind. + std::optional ForceEHFrames; + if (auto Err = J.getExecutionSession().getBootstrapMapValue( + "darwin-use-ehframes-only", ForceEHFrames)) + return Err; + if (ForceEHFrames.has_value()) + UseEHFrames = *ForceEHFrames; + else + UseEHFrames = false; + + // If UseEHFrames hasn't been set then we're good to use compact-unwind. + if (!UseEHFrames) { + if (auto UIRP = UnwindInfoRegistrationPlugin::Create( + J.getIRCompileLayer(), PlatformJD)) { + OLL->addPlugin(std::move(*UIRP)); + LLVM_DEBUG(dbgs() << "Enabled compact-unwind support.\n"); + } else + return UIRP.takeError(); + } } // Otherwise fall back to standard unwind registration. - if (!CompactUnwindInfoSupported) { + if (UseEHFrames) { auto &ES = J.getExecutionSession(); if (auto EHFrameRegistrar = EPCEHFrameRegistrar::Create(ES)) { OLL->addPlugin(std::make_unique( diff --git a/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp index 845990d965b16..d4e341a96f5b1 100644 --- a/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp +++ b/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp @@ -481,6 +481,15 @@ MachOPlatform::MachOPlatform( ObjLinkingLayer.addPlugin(std::make_unique(*this)); PlatformJD.addGenerator(std::move(OrcRuntimeGenerator)); + { + // Check for force-eh-frame + std::optional ForceEHFrames; + if ((Err = ES.getBootstrapMapValue("darwin-use-ehframes-only", + ForceEHFrames))) + return; + this->ForceEHFrames = ForceEHFrames.has_value() ? *ForceEHFrames : false; + } + BootstrapInfo BI; Bootstrap = &BI; @@ -811,6 +820,12 @@ void MachOPlatform::MachOPlatformPlugin::modifyPassConfig( HeaderAddr = I->second; } + // If we're forcing eh-frame use then discard the compact-unwind section + // immediately to prevent FDEs from being stripped. + if (MP.ForceEHFrames) + if (auto *CUSec = LG.findSectionByName(MachOCompactUnwindSectionName)) + LG.removeSection(*CUSec); + // Point the libunwind dso-base absolute symbol at the header for the // JITDylib. This will prevent us from synthesizing a new header for // every object. diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt b/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt index 1d29a89d5eb09..9f3abac156adb 100644 --- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt +++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt @@ -14,6 +14,7 @@ endif() add_llvm_component_library(LLVMOrcTargetProcess ExecutorSharedMemoryMapperService.cpp + DefaultHostBootstrapValues.cpp JITLoaderGDB.cpp JITLoaderPerf.cpp JITLoaderVTune.cpp diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.cpp new file mode 100644 index 0000000000000..c95b7ac5159fe --- /dev/null +++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.cpp @@ -0,0 +1,36 @@ +//===----- DefaultHostBootstrapValues.cpp - Defaults for host process -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h" + +#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h" +#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h" + +#ifdef __APPLE__ +#include +#endif // __APPLE__ + +namespace llvm::orc { + +void addDefaultBootstrapValuesForHostProcess( + StringMap> &BootstrapMap, + StringMap &BootstrapSymbols) { + + // FIXME: We probably shouldn't set these on Windows? + BootstrapSymbols[rt::RegisterEHFrameSectionWrapperName] = + ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper); + BootstrapSymbols[rt::DeregisterEHFrameSectionWrapperName] = + ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper); + +#ifdef __APPLE__ + if (!dlsym(RTLD_DEFAULT, "__unw_add_find_dynamic_unwind_sections")) + BootstrapMap["darwin-use-ehframes-only"].push_back(1); +#endif // __APPLE__ +} + +} // namespace llvm::orc diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp index d88fbbfc86385..c4f201b353d27 100644 --- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp +++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp @@ -106,10 +106,6 @@ void addTo(StringMap &M) { ExecutorAddr::fromPtr(&writeBuffersWrapper); M[rt::MemoryWritePointersWrapperName] = ExecutorAddr::fromPtr(&writePointersWrapper); - M[rt::RegisterEHFrameSectionWrapperName] = - ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper); - M[rt::DeregisterEHFrameSectionWrapperName] = - ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper); M[rt::RunAsMainWrapperName] = ExecutorAddr::fromPtr(&runAsMainWrapper); M[rt::RunAsVoidFunctionWrapperName] = ExecutorAddr::fromPtr(&runAsVoidFunctionWrapper); diff --git a/llvm/lib/MC/CMakeLists.txt b/llvm/lib/MC/CMakeLists.txt index e1d19196c8766..f49f14c848b90 100644 --- a/llvm/lib/MC/CMakeLists.txt +++ b/llvm/lib/MC/CMakeLists.txt @@ -1,6 +1,7 @@ add_llvm_component_library(LLVMMC ConstantPools.cpp DXContainerPSVInfo.cpp + DXContainerRootSignature.cpp ELFObjectWriter.cpp GOFFObjectWriter.cpp MCAsmBackend.cpp diff --git a/llvm/lib/MC/DXContainerRootSignature.cpp b/llvm/lib/MC/DXContainerRootSignature.cpp new file mode 100644 index 0000000000000..000d23f24d241 --- /dev/null +++ b/llvm/lib/MC/DXContainerRootSignature.cpp @@ -0,0 +1,23 @@ +//===- llvm/MC/DXContainerRootSignature.cpp - RootSignature -*- C++ -*-=======// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/MC/DXContainerRootSignature.h" +#include "llvm/Support/EndianStream.h" + +using namespace llvm; +using namespace llvm::mcdxbc; + +void RootSignatureHeader::write(raw_ostream &OS) { + + support::endian::write(OS, Version, llvm::endianness::little); + support::endian::write(OS, NumParameters, llvm::endianness::little); + support::endian::write(OS, RootParametersOffset, llvm::endianness::little); + support::endian::write(OS, NumStaticSamplers, llvm::endianness::little); + support::endian::write(OS, StaticSamplersOffset, llvm::endianness::little); + support::endian::write(OS, Flags, llvm::endianness::little); +} diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp index 5f586fe19a5bb..68e7f1785fa23 100644 --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -71,9 +71,12 @@ STATISTIC(StrtabBytes, "Total size of SHT_STRTAB sections"); STATISTIC(SymtabBytes, "Total size of SHT_SYMTAB sections"); STATISTIC(RelocationBytes, "Total size of relocation sections"); STATISTIC(DynsymBytes, "Total size of SHT_DYNSYM sections"); -STATISTIC(DebugBytes, "Total size of debug info sections"); +STATISTIC( + DebugBytes, + "Total size of debug info sections (not including those written to .dwo)"); STATISTIC(UnwindBytes, "Total size of unwind sections"); STATISTIC(OtherBytes, "Total size of uncategorized sections"); +STATISTIC(DwoBytes, "Total size of sections written to .dwo file"); } // namespace stats @@ -969,7 +972,9 @@ void ELFWriter::writeSectionHeaders(const MCAssembler &Asm) { return Section->getFlags() & Flag; }; - if (Section->getName().starts_with(".debug")) { + if (Mode == DwoOnly) { + stats::DwoBytes += Size; + } else if (Section->getName().starts_with(".debug")) { stats::DebugBytes += Size; } else if (Section->getName().starts_with(".eh_frame")) { stats::UnwindBytes += Size; diff --git a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp index 5aa0079f3fbc7..9c78f7433ad33 100644 --- a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp +++ b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp @@ -186,27 +186,28 @@ static std::unique_ptr createWriter(const CommonConfig &Config, } static Error dumpSectionToFile(StringRef SecName, StringRef Filename, - Object &Obj) { + StringRef InputFilename, Object &Obj) { for (auto &Sec : Obj.sections()) { if (Sec.Name == SecName) { if (Sec.Type == SHT_NOBITS) - return createStringError(object_error::parse_failed, - "cannot dump section '%s': it has no contents", - SecName.str().c_str()); + return createFileError(InputFilename, object_error::parse_failed, + "cannot dump section '%s': it has no contents", + SecName.str().c_str()); Expected> BufferOrErr = FileOutputBuffer::create(Filename, Sec.OriginalData.size()); if (!BufferOrErr) - return BufferOrErr.takeError(); + return createFileError(Filename, BufferOrErr.takeError()); std::unique_ptr Buf = std::move(*BufferOrErr); std::copy(Sec.OriginalData.begin(), Sec.OriginalData.end(), Buf->getBufferStart()); if (Error E = Buf->commit()) - return E; + return createFileError(Filename, std::move(E)); return Error::success(); } } - return createStringError(object_error::parse_failed, "section '%s' not found", - SecName.str().c_str()); + + return createFileError(InputFilename, object_error::parse_failed, + "section '%s' not found", SecName.str().c_str()); } Error Object::compressOrDecompressSections(const CommonConfig &Config) { @@ -798,7 +799,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, StringRef SectionName; StringRef FileName; std::tie(SectionName, FileName) = Flag.split('='); - if (Error E = dumpSectionToFile(SectionName, FileName, Obj)) + if (Error E = + dumpSectionToFile(SectionName, FileName, Config.InputFilename, Obj)) return E; } @@ -807,10 +809,10 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, // us to avoid reporting the inappropriate errors about removing symbols // named in relocations. if (Error E = replaceAndRemoveSections(Config, ELFConfig, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); if (Error E = updateAndRemoveSymbols(Config, ELFConfig, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); if (!Config.SetSectionAlignment.empty()) { for (SectionBase &Sec : Obj.sections()) { @@ -826,8 +828,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, if (Config.ChangeSectionLMAValAll > 0 && Seg.PAddr > std::numeric_limits::max() - Config.ChangeSectionLMAValAll) { - return createStringError( - errc::invalid_argument, + return createFileError( + Config.InputFilename, errc::invalid_argument, "address 0x" + Twine::utohexstr(Seg.PAddr) + " cannot be increased by 0x" + Twine::utohexstr(Config.ChangeSectionLMAValAll) + @@ -835,8 +837,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, } else if (Config.ChangeSectionLMAValAll < 0 && Seg.PAddr < std::numeric_limits::min() - Config.ChangeSectionLMAValAll) { - return createStringError( - errc::invalid_argument, + return createFileError( + Config.InputFilename, errc::invalid_argument, "address 0x" + Twine::utohexstr(Seg.PAddr) + " cannot be decreased by 0x" + Twine::utohexstr(std::abs(Config.ChangeSectionLMAValAll)) + @@ -849,10 +851,9 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, if (!Config.ChangeSectionAddress.empty()) { if (Obj.Type != ELF::ET_REL) - return createStringError( - object_error::invalid_file_type, + return createFileError( + Config.InputFilename, object_error::invalid_file_type, "cannot change section address in a non-relocatable file"); - StringMap SectionsToUpdateAddress; for (const SectionPatternAddressUpdate &PatternUpdate : make_range(Config.ChangeSectionAddress.rbegin(), @@ -863,8 +864,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, .second) { if (PatternUpdate.Update.Kind == AdjustKind::Subtract && Sec.Addr < PatternUpdate.Update.Value) { - return createStringError( - errc::invalid_argument, + return createFileError( + Config.InputFilename, errc::invalid_argument, "address 0x" + Twine::utohexstr(Sec.Addr) + " cannot be decreased by 0x" + Twine::utohexstr(PatternUpdate.Update.Value) + @@ -873,8 +874,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, if (PatternUpdate.Update.Kind == AdjustKind::Add && Sec.Addr > std::numeric_limits::max() - PatternUpdate.Update.Value) { - return createStringError( - errc::invalid_argument, + return createFileError( + Config.InputFilename, errc::invalid_argument, "address 0x" + Twine::utohexstr(Sec.Addr) + " cannot be increased by 0x" + Twine::utohexstr(PatternUpdate.Update.Value) + @@ -909,7 +910,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, if (!ELFConfig.NotesToRemove.empty()) { if (Error Err = removeNotes(Obj, E, ELFConfig.NotesToRemove, Config.ErrorCallback)) - return Err; + return createFileError(Config.InputFilename, std::move(Err)); } for (const NewSectionInfo &AddedSection : Config.AddSection) { @@ -924,7 +925,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, return Error::success(); }; if (Error E = handleUserSection(AddedSection, AddSection)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } for (const NewSectionInfo &NewSection : Config.UpdateSection) { @@ -932,7 +933,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, return Obj.updateSection(Name, Data); }; if (Error E = handleUserSection(NewSection, UpdateSection)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } if (!Config.AddGnuDebugLink.empty()) @@ -943,7 +944,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, // before adding new symbols. if (!Obj.SymbolTable && !Config.SymbolsToAdd.empty()) if (Error E = Obj.addNewSymbolTable()) - return E; + return createFileError(Config.InputFilename, std::move(E)); for (const NewSymbolInfo &SI : Config.SymbolsToAdd) addSymbol(Obj, SI, ELFConfig.NewSymbolVisibility); @@ -955,7 +956,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, if (Iter != Config.SetSectionFlags.end()) { const SectionFlagsUpdate &SFU = Iter->second; if (Error E = setSectionFlagsAndType(Sec, SFU.NewFlags, Obj.Machine)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } auto It2 = Config.SetSectionType.find(Sec.Name); if (It2 != Config.SetSectionType.end()) @@ -974,7 +975,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, Sec.Name = std::string(SR.NewName); if (SR.NewFlags) { if (Error E = setSectionFlagsAndType(Sec, *SR.NewFlags, Obj.Machine)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } RenamedSections.insert(&Sec); } else if (RelocSec && !(Sec.Flags & SHF_ALLOC)) @@ -1091,7 +1092,7 @@ Error objcopy::elf::executeObjcopyOnBinary(const CommonConfig &Config, : getOutputElfType(In); if (Error E = handleArgs(Config, ELFConfig, OutputElfType, **Obj)) - return createFileError(Config.InputFilename, std::move(E)); + return E; if (Error E = writeOutput(Config, **Obj, Out, OutputElfType)) return createFileError(Config.InputFilename, std::move(E)); diff --git a/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp b/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp index a188425b283fa..682edffc84f34 100644 --- a/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp +++ b/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp @@ -306,25 +306,25 @@ static Error processLoadCommands(const MachOConfig &MachOConfig, Object &Obj) { } static Error dumpSectionToFile(StringRef SecName, StringRef Filename, - Object &Obj) { + StringRef InputFilename, Object &Obj) { for (LoadCommand &LC : Obj.LoadCommands) for (const std::unique_ptr
&Sec : LC.Sections) { if (Sec->CanonicalName == SecName) { Expected> BufferOrErr = FileOutputBuffer::create(Filename, Sec->Content.size()); if (!BufferOrErr) - return BufferOrErr.takeError(); + return createFileError(Filename, BufferOrErr.takeError()); std::unique_ptr Buf = std::move(*BufferOrErr); llvm::copy(Sec->Content, Buf->getBufferStart()); if (Error E = Buf->commit()) - return E; + return createFileError(Filename, std::move(E)); return Error::success(); } } - return createStringError(object_error::parse_failed, "section '%s' not found", - SecName.str().c_str()); + return createFileError(InputFilename, object_error::parse_failed, + "section '%s' not found", SecName.str().c_str()); } static Error addSection(const NewSectionInfo &NewSection, Object &Obj) { @@ -426,12 +426,13 @@ static Error handleArgs(const CommonConfig &Config, StringRef SectionName; StringRef FileName; std::tie(SectionName, FileName) = Flag.split('='); - if (Error E = dumpSectionToFile(SectionName, FileName, Obj)) + if (Error E = + dumpSectionToFile(SectionName, FileName, Config.InputFilename, Obj)) return E; } if (Error E = removeSections(Config, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); // Mark symbols to determine which symbols are still needed. if (Config.StripAll) @@ -446,20 +447,20 @@ static Error handleArgs(const CommonConfig &Config, for (const NewSectionInfo &NewSection : Config.AddSection) { if (Error E = isValidMachOCannonicalName(NewSection.SectionName)) - return E; + return createFileError(Config.InputFilename, std::move(E)); if (Error E = addSection(NewSection, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } for (const NewSectionInfo &NewSection : Config.UpdateSection) { if (Error E = isValidMachOCannonicalName(NewSection.SectionName)) - return E; + return createFileError(Config.InputFilename, std::move(E)); if (Error E = updateSection(NewSection, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); } if (Error E = processLoadCommands(MachOConfig, Obj)) - return E; + return createFileError(Config.InputFilename, std::move(E)); return Error::success(); } @@ -479,7 +480,7 @@ Error objcopy::macho::executeObjcopyOnBinary(const CommonConfig &Config, Config.InputFilename.str().c_str()); if (Error E = handleArgs(Config, MachOConfig, **O)) - return createFileError(Config.InputFilename, std::move(E)); + return E; // Page size used for alignment of segment sizes in Mach-O executables and // dynamic libraries. diff --git a/llvm/lib/ObjCopy/wasm/WasmObjcopy.cpp b/llvm/lib/ObjCopy/wasm/WasmObjcopy.cpp index cf3d884bee3bd..57fd0f5ad233c 100644 --- a/llvm/lib/ObjCopy/wasm/WasmObjcopy.cpp +++ b/llvm/lib/ObjCopy/wasm/WasmObjcopy.cpp @@ -38,23 +38,23 @@ static bool isCommentSection(const Section &Sec) { } static Error dumpSectionToFile(StringRef SecName, StringRef Filename, - Object &Obj) { + StringRef InputFilename, Object &Obj) { for (const Section &Sec : Obj.Sections) { if (Sec.Name == SecName) { ArrayRef Contents = Sec.Contents; Expected> BufferOrErr = FileOutputBuffer::create(Filename, Contents.size()); if (!BufferOrErr) - return BufferOrErr.takeError(); + return createFileError(Filename, BufferOrErr.takeError()); std::unique_ptr Buf = std::move(*BufferOrErr); std::copy(Contents.begin(), Contents.end(), Buf->getBufferStart()); if (Error E = Buf->commit()) - return E; + return createFileError(Filename, std::move(E)); return Error::success(); } } - return createStringError(errc::invalid_argument, "section '%s' not found", - SecName.str().c_str()); + return createFileError(Filename, errc::invalid_argument, + "section '%s' not found", SecName.str().c_str()); } static void removeSections(const CommonConfig &Config, Object &Obj) { @@ -115,8 +115,9 @@ static Error handleArgs(const CommonConfig &Config, Object &Obj) { StringRef SecName; StringRef FileName; std::tie(SecName, FileName) = Flag.split("="); - if (Error E = dumpSectionToFile(SecName, FileName, Obj)) - return createFileError(FileName, std::move(E)); + if (Error E = + dumpSectionToFile(SecName, FileName, Config.InputFilename, Obj)) + return E; } removeSections(Config, Obj); diff --git a/llvm/lib/Object/DXContainer.cpp b/llvm/lib/Object/DXContainer.cpp index 3b1a6203a1f8f..f28b096008b2f 100644 --- a/llvm/lib/Object/DXContainer.cpp +++ b/llvm/lib/Object/DXContainer.cpp @@ -10,6 +10,7 @@ #include "llvm/BinaryFormat/DXContainer.h" #include "llvm/Object/Error.h" #include "llvm/Support/Alignment.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/FormatVariadic.h" using namespace llvm; @@ -92,6 +93,15 @@ Error DXContainer::parseHash(StringRef Part) { return Error::success(); } +Error DXContainer::parseRootSignature(StringRef Part) { + if (RootSignature) + return parseFailed("More than one RTS0 part is present in the file"); + RootSignature = DirectX::RootSignature(); + if (Error Err = RootSignature->parse(Part)) + return Err; + return Error::success(); +} + Error DXContainer::parsePSVInfo(StringRef Part) { if (PSVInfo) return parseFailed("More than one PSV0 part is present in the file"); @@ -193,6 +203,10 @@ Error DXContainer::parsePartOffsets() { break; case dxbc::PartType::Unknown: break; + case dxbc::PartType::RTS0: + if (Error Err = parseRootSignature(PartData)) + return Err; + break; } } @@ -228,6 +242,53 @@ void DXContainer::PartIterator::updateIteratorImpl(const uint32_t Offset) { IteratorState.Offset = Offset; } +Error DirectX::RootSignature::parse(StringRef Data) { + const char *Current = Data.begin(); + + // Root Signature headers expects 6 integers to be present. + if (Data.size() < 6 * sizeof(uint32_t)) + return parseFailed( + "Invalid root signature, insufficient space for header."); + + uint32_t VValue = + support::endian::read(Current); + Current += sizeof(uint32_t); + + Expected MaybeVersion = + dxbc::RootSignatureValidations::validateVersion(VValue); + if (Error E = MaybeVersion.takeError()) + return E; + Version = MaybeVersion.get(); + + NumParameters = + support::endian::read(Current); + Current += sizeof(uint32_t); + + RootParametersOffset = + support::endian::read(Current); + Current += sizeof(uint32_t); + + NumStaticSamplers = + support::endian::read(Current); + Current += sizeof(uint32_t); + + StaticSamplersOffset = + support::endian::read(Current); + Current += sizeof(uint32_t); + + uint32_t FValue = + support::endian::read(Current); + Current += sizeof(uint32_t); + + Expected MaybeFlag = + dxbc::RootSignatureValidations::validateRootFlag(FValue); + if (Error E = MaybeFlag.takeError()) + return E; + Flags = MaybeFlag.get(); + + return Error::success(); +} + Error DirectX::PSVRuntimeInfo::parse(uint16_t ShaderKind) { Triple::EnvironmentType ShaderStage = dxbc::getShaderStage(ShaderKind); diff --git a/llvm/lib/ObjectYAML/DWARFEmitter.cpp b/llvm/lib/ObjectYAML/DWARFEmitter.cpp index 421dfe7dfa30e..ec5e08082b0ca 100644 --- a/llvm/lib/ObjectYAML/DWARFEmitter.cpp +++ b/llvm/lib/ObjectYAML/DWARFEmitter.cpp @@ -96,12 +96,11 @@ Error DWARFYAML::emitDebugStr(raw_ostream &OS, const DWARFYAML::Data &DI) { StringRef DWARFYAML::Data::getAbbrevTableContentByIndex(uint64_t Index) const { assert(Index < DebugAbbrev.size() && "Index should be less than the size of DebugAbbrev array"); - auto It = AbbrevTableContents.find(Index); - if (It != AbbrevTableContents.cend()) + auto [It, Inserted] = AbbrevTableContents.try_emplace(Index); + if (!Inserted) return It->second; - std::string AbbrevTableBuffer; - raw_string_ostream OS(AbbrevTableBuffer); + raw_string_ostream OS(It->second); uint64_t AbbrevCode = 0; for (const DWARFYAML::Abbrev &AbbrevDecl : DebugAbbrev[Index].Table) { @@ -123,9 +122,7 @@ StringRef DWARFYAML::Data::getAbbrevTableContentByIndex(uint64_t Index) const { // consisting of a 0 byte for the abbreviation code. OS.write_zeros(1); - AbbrevTableContents.insert({Index, AbbrevTableBuffer}); - - return AbbrevTableContents[Index]; + return It->second; } Error DWARFYAML::emitDebugAbbrev(raw_ostream &OS, const DWARFYAML::Data &DI) { diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp index 175f1a12f9314..b7d1c6558fa1f 100644 --- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp +++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp @@ -13,6 +13,7 @@ #include "llvm/BinaryFormat/DXContainer.h" #include "llvm/MC/DXContainerPSVInfo.h" +#include "llvm/MC/DXContainerRootSignature.h" #include "llvm/ObjectYAML/ObjectYAML.h" #include "llvm/ObjectYAML/yaml2obj.h" #include "llvm/Support/Errc.h" @@ -261,6 +262,20 @@ void DXContainerWriter::writeParts(raw_ostream &OS) { } case dxbc::PartType::Unknown: break; // Skip any handling for unrecognized parts. + case dxbc::PartType::RTS0: + if (!P.RootSignature.has_value()) + continue; + + mcdxbc::RootSignatureHeader Header; + Header.Flags = P.RootSignature->getEncodedFlags(); + Header.Version = P.RootSignature->Version; + Header.NumParameters = P.RootSignature->NumParameters; + Header.RootParametersOffset = P.RootSignature->RootParametersOffset; + Header.NumStaticSamplers = P.RootSignature->NumStaticSamplers; + Header.StaticSamplersOffset = P.RootSignature->StaticSamplersOffset; + + Header.write(OS); + break; } uint64_t BytesWritten = OS.tell() - DataStart; RollingOffset += BytesWritten; diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp index 5dee1221b27c0..0869fd4fa9785 100644 --- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp +++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp @@ -29,6 +29,27 @@ DXContainerYAML::ShaderFeatureFlags::ShaderFeatureFlags(uint64_t FlagData) { #include "llvm/BinaryFormat/DXContainerConstants.def" } +DXContainerYAML::RootSignatureDesc::RootSignatureDesc( + const object::DirectX::RootSignature &Data) + : Version(Data.getVersion()), NumParameters(Data.getNumParameters()), + RootParametersOffset(Data.getRootParametersOffset()), + NumStaticSamplers(Data.getNumStaticSamplers()), + StaticSamplersOffset(Data.getStaticSamplersOffset()) { + uint32_t Flags = Data.getFlags(); +#define ROOT_ELEMENT_FLAG(Num, Val) \ + Val = (Flags & (uint32_t)dxbc::RootElementFlag::Val) > 0; +#include "llvm/BinaryFormat/DXContainerConstants.def" +} + +uint32_t DXContainerYAML::RootSignatureDesc::getEncodedFlags() { + uint64_t Flag = 0; +#define ROOT_ELEMENT_FLAG(Num, Val) \ + if (Val) \ + Flag |= (uint32_t)dxbc::RootElementFlag::Val; +#include "llvm/BinaryFormat/DXContainerConstants.def" + return Flag; +} + uint64_t DXContainerYAML::ShaderFeatureFlags::getEncodedFlags() { uint64_t Flag = 0; #define SHADER_FEATURE_FLAG(Num, DxilModuleNum, Val, Str) \ @@ -188,6 +209,17 @@ void MappingTraits::mapping( IO.mapRequired("Parameters", S.Parameters); } +void MappingTraits::mapping( + IO &IO, DXContainerYAML::RootSignatureDesc &S) { + IO.mapRequired("Version", S.Version); + IO.mapRequired("NumParameters", S.NumParameters); + IO.mapRequired("RootParametersOffset", S.RootParametersOffset); + IO.mapRequired("NumStaticSamplers", S.NumStaticSamplers); + IO.mapRequired("StaticSamplersOffset", S.StaticSamplersOffset); +#define ROOT_ELEMENT_FLAG(Num, Val) IO.mapOptional(#Val, S.Val, false); +#include "llvm/BinaryFormat/DXContainerConstants.def" +} + void MappingTraits::mapping(IO &IO, DXContainerYAML::Part &P) { IO.mapRequired("Name", P.Name); @@ -197,6 +229,7 @@ void MappingTraits::mapping(IO &IO, IO.mapOptional("Hash", P.Hash); IO.mapOptional("PSVInfo", P.Info); IO.mapOptional("Signature", P.Signature); + IO.mapOptional("RootSignature", P.RootSignature); } void MappingTraits::mapping( diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 650d23ac1d5ef..e7ba7213a76fe 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -119,7 +119,6 @@ #include "llvm/CodeGen/MachinePassManager.h" #include "llvm/CodeGen/MachinePostDominators.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/MachineTraceMetrics.h" #include "llvm/CodeGen/MachineVerifier.h" #include "llvm/CodeGen/OptimizePHIs.h" diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp index c6cf6cdbe9390..63e70d7e182bd 100644 --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -200,9 +200,9 @@ static cl::opt ExtraVectorizerPasses( static cl::opt RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden, cl::desc("Run the NewGVN pass")); -static cl::opt EnableLoopInterchange( - "enable-loopinterchange", cl::init(false), cl::Hidden, - cl::desc("Enable the experimental LoopInterchange Pass")); +static cl::opt + EnableLoopInterchange("enable-loopinterchange", cl::init(false), cl::Hidden, + cl::desc("Enable the LoopInterchange Pass")); static cl::opt EnableUnrollAndJam("enable-unroll-and-jam", cl::init(false), cl::Hidden, @@ -316,6 +316,7 @@ PipelineTuningOptions::PipelineTuningOptions() { LoopVectorization = true; SLPVectorization = false; LoopUnrolling = true; + LoopInterchange = EnableLoopInterchange; ForgetAllSCEVInLoopUnroll = ForgetSCEVInLoopUnroll; LicmMssaOptCap = SetLicmMssaOptCap; LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap; @@ -485,7 +486,7 @@ PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level, LPM2.addPass(LoopDeletionPass()); - if (EnableLoopInterchange) + if (PTO.LoopInterchange) LPM2.addPass(LoopInterchangePass()); // Do not enable unrolling in PreLinkThinLTO phase during sample PGO @@ -676,7 +677,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level, LPM2.addPass(LoopDeletionPass()); - if (EnableLoopInterchange) + if (PTO.LoopInterchange) LPM2.addPass(LoopInterchangePass()); // Do not enable unrolling in PreLinkThinLTO phase during sample PGO @@ -969,7 +970,7 @@ PassBuilder::buildInlinerPipeline(OptimizationLevel Level, // Try to perform OpenMP specific optimizations. This is a (quick!) no-op if // there are no OpenMP runtime calls present in the module. if (Level == OptimizationLevel::O2 || Level == OptimizationLevel::O3) - MainCGPipeline.addPass(OpenMPOptCGSCCPass()); + MainCGPipeline.addPass(OpenMPOptCGSCCPass(Phase)); invokeCGSCCOptimizerLateEPCallbacks(MainCGPipeline, Level); @@ -1137,7 +1138,7 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level, // Try to perform OpenMP specific optimizations on the module. This is a // (quick!) no-op if there are no OpenMP runtime calls present in the module. - MPM.addPass(OpenMPOptPass()); + MPM.addPass(OpenMPOptPass(Phase)); if (AttributorRun & AttributorRunOption::MODULE) MPM.addPass(AttributorPass()); diff --git a/llvm/lib/SandboxIR/Tracker.cpp b/llvm/lib/SandboxIR/Tracker.cpp index 5fa9f181055ca..4fa9e11ae0d4e 100644 --- a/llvm/lib/SandboxIR/Tracker.cpp +++ b/llvm/lib/SandboxIR/Tracker.cpp @@ -347,13 +347,14 @@ void Tracker::save() { void Tracker::revert() { assert(State == TrackerState::Record && "Forgot to save()!"); - State = TrackerState::Disabled; + State = TrackerState::Reverting; for (auto &Change : reverse(Changes)) Change->revert(*this); Changes.clear(); #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) SnapshotChecker.expectNoDiff(); #endif + State = TrackerState::Disabled; } void Tracker::accept() { diff --git a/llvm/lib/Support/Unix/Signals.inc b/llvm/lib/Support/Unix/Signals.inc index 2e7b467a14bbe..30e5f40193974 100644 --- a/llvm/lib/Support/Unix/Signals.inc +++ b/llvm/lib/Support/Unix/Signals.inc @@ -80,7 +80,7 @@ using namespace llvm; -static void SignalHandler(int Sig); // defined below. +static void SignalHandler(int Sig, siginfo_t *Info, void *); static void InfoSignalHandler(int Sig); // defined below. using SignalHandlerFunctionType = void (*)(); @@ -313,8 +313,8 @@ static void RegisterHandlers() { // Not signal-safe. switch (Kind) { case SignalKind::IsKill: - NewHandler.sa_handler = SignalHandler; - NewHandler.sa_flags = SA_NODEFER | SA_RESETHAND | SA_ONSTACK; + NewHandler.sa_sigaction = SignalHandler; + NewHandler.sa_flags = SA_NODEFER | SA_RESETHAND | SA_ONSTACK | SA_SIGINFO; break; case SignalKind::IsInfo: NewHandler.sa_handler = InfoSignalHandler; @@ -370,7 +370,7 @@ void sys::CleanupOnSignal(uintptr_t Context) { } // The signal handler that runs. -static void SignalHandler(int Sig) { +static void SignalHandler(int Sig, siginfo_t *Info, void *) { // Restore the signal behavior to default, so that the program actually // crashes when we return and the signal reissues. This also ensures that if // we crash in our signal handler that the program will terminate immediately @@ -412,6 +412,11 @@ static void SignalHandler(int Sig) { if (Sig == SIGILL || Sig == SIGFPE || Sig == SIGTRAP) raise(Sig); #endif + + // Signal sent from another process, do not assume that continuing the + // execution would re-raise it. + if (Info->si_pid != getpid()) + raise(Sig); } static void InfoSignalHandler(int Sig) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8b8299f9e4911..34464d317beaf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -735,19 +735,19 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::FCOPYSIGN, MVT::bf16, Promote); } - for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI, - ISD::FCOS, ISD::FSIN, ISD::FSINCOS, - ISD::FACOS, ISD::FASIN, ISD::FATAN, - ISD::FATAN2, ISD::FCOSH, ISD::FSINH, - ISD::FTANH, ISD::FTAN, ISD::FEXP, - ISD::FEXP2, ISD::FEXP10, ISD::FLOG, - ISD::FLOG2, ISD::FLOG10, ISD::STRICT_FREM, - ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS, - ISD::STRICT_FSIN, ISD::STRICT_FACOS, ISD::STRICT_FASIN, - ISD::STRICT_FATAN, ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, - ISD::STRICT_FSINH, ISD::STRICT_FTANH, ISD::STRICT_FEXP, - ISD::STRICT_FEXP2, ISD::STRICT_FLOG, ISD::STRICT_FLOG2, - ISD::STRICT_FLOG10, ISD::STRICT_FTAN}) { + for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI, + ISD::FCOS, ISD::FSIN, ISD::FSINCOS, + ISD::FMODF, ISD::FACOS, ISD::FASIN, + ISD::FATAN, ISD::FATAN2, ISD::FCOSH, + ISD::FSINH, ISD::FTANH, ISD::FTAN, + ISD::FEXP, ISD::FEXP2, ISD::FEXP10, + ISD::FLOG, ISD::FLOG2, ISD::FLOG10, + ISD::STRICT_FREM, ISD::STRICT_FPOW, ISD::STRICT_FPOWI, + ISD::STRICT_FCOS, ISD::STRICT_FSIN, ISD::STRICT_FACOS, + ISD::STRICT_FASIN, ISD::STRICT_FATAN, ISD::STRICT_FATAN2, + ISD::STRICT_FCOSH, ISD::STRICT_FSINH, ISD::STRICT_FTANH, + ISD::STRICT_FEXP, ISD::STRICT_FEXP2, ISD::STRICT_FLOG, + ISD::STRICT_FLOG2, ISD::STRICT_FLOG10, ISD::STRICT_FTAN}) { setOperationAction(Op, MVT::f16, Promote); setOperationAction(Op, MVT::v4f16, Expand); setOperationAction(Op, MVT::v8f16, Expand); @@ -22363,6 +22363,9 @@ static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N, if (!IsDeInterleave) IsUndefDeInterleave = Shuffle->getOperand(1).isUndef() && + all_of( + Shuffle->getMask().slice(ExtOffset, VT.getVectorNumElements() / 2), + [](int M) { return M < 0; }) && ShuffleVectorInst::isDeInterleaveMaskOfFactor( Shuffle->getMask().slice(ExtOffset + VT.getVectorNumElements() / 2, VT.getVectorNumElements() / 2), diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp index 68c386585a79a..bb36af8fce5cc 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -130,7 +130,12 @@ void AArch64Subtarget::initializeProperties(bool HasMinSize) { // this in the future so we can specify it together with the subtarget // features. switch (ARMProcFamily) { - case Others: + case Generic: + // Using TuneCPU=generic we avoid ldapur instructions to line up with the + // cpus that use the AvoidLDAPUR feature. We don't want this to be on + // forever, so it is enabled between armv8.4 and armv8.7/armv9.2. + if (hasV8_4aOps() && !hasV8_8aOps()) + AvoidLDAPUR = true; break; case Carmel: CacheLineSize = 64; diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index e7757907a6643..c6eb77e3bc3ba 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -38,7 +38,7 @@ class Triple; class AArch64Subtarget final : public AArch64GenSubtargetInfo { public: enum ARMProcFamilyEnum : uint8_t { - Others, + Generic, #define ARM_PROCESSOR_FAMILY(ENUM) ENUM, #include "llvm/TargetParser/AArch64TargetParserDef.inc" #undef ARM_PROCESSOR_FAMILY @@ -46,7 +46,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { protected: /// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others. - ARMProcFamilyEnum ARMProcFamily = Others; + ARMProcFamilyEnum ARMProcFamily = Generic; // Enable 64-bit vectorization in SLP. unsigned MinVectorRegisterBitWidth = 64; diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index cc2f097573d91..325056c781a53 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -4282,7 +4282,7 @@ void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by // checking for that case, we can ensure that the default behaviour is // unchanged - if (ST->getProcFamily() != AArch64Subtarget::Others && + if (ST->getProcFamily() != AArch64Subtarget::Generic && !ST->getSchedModel().isOutOfOrder()) { UP.Runtime = true; UP.Partial = true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index fffd30b26dc1d..c6d36fde9730a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -69,7 +69,6 @@ #include "llvm/CodeGen/MIRParser/MIParser.h" #include "llvm/CodeGen/MachineCSE.h" #include "llvm/CodeGen/MachineLICM.h" -#include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/CodeGen/TargetPassConfig.h" @@ -1932,7 +1931,6 @@ AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder( GCNTargetMachine &TM, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) : CodeGenPassBuilder(TM, Opts, PIC) { - Opt.MISchedPostRA = true; Opt.RequiresCodeGenSCCOrder = true; // Exceptions and StackMaps are not supported, so these passes will never do // anything. diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 6fc57dec6a826..71c720ed09b5f 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -3516,30 +3516,6 @@ bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const { OpType <= AMDGPU::OPERAND_SRC_LAST; } -bool SIRegisterInfo::shouldRewriteCopySrc( - const TargetRegisterClass *DefRC, - unsigned DefSubReg, - const TargetRegisterClass *SrcRC, - unsigned SrcSubReg) const { - // We want to prefer the smallest register class possible, so we don't want to - // stop and rewrite on anything that looks like a subregister - // extract. Operations mostly don't care about the super register class, so we - // only want to stop on the most basic of copies between the same register - // class. - // - // e.g. if we have something like - // %0 = ... - // %1 = ... - // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2 - // %3 = COPY %2, sub0 - // - // We want to look through the COPY to find: - // => %3 = COPY %0 - - // Plain copy. - return getCommonSubClass(DefRC, SrcRC) != nullptr; -} - bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const { // TODO: 64-bit operands have extending behavior from 32-bit literal. return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST && diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h index 8e481e3ac2304..a434efb70d052 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h @@ -275,11 +275,6 @@ class SIRegisterInfo final : public AMDGPUGenRegisterInfo { const TargetRegisterClass *SubRC, unsigned SubIdx) const; - bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, - unsigned DefSubReg, - const TargetRegisterClass *SrcRC, - unsigned SrcSubReg) const override; - /// \returns True if operands defined with this operand type can accept /// a literal constant (i.e. any 32-bit immediate). bool opCanUseLiteralConstant(unsigned OpType) const; diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index ce73e0ca361d9..afafc2ecccfaf 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -212,7 +212,13 @@ defm V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile, AMDGP defm V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile, AMDGPUbfe_i32>; defm V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile, AMDGPUbfi>; defm V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile, fshr>; + +let True16Predicate = NotHasTrue16BitInsts in defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile, int_amdgcn_alignbyte>; +let True16Predicate = UseRealTrue16Insts in +defm V_ALIGNBYTE_B32_t16 : VOP3Inst <"v_alignbyte_b32_t16", VOP3_Profile_True16>; +let True16Predicate = UseFakeTrue16Insts in +defm V_ALIGNBYTE_B32_fake16 : VOP3Inst <"v_alignbyte_b32_fake16", VOP3_Profile_Fake16>; // XXX - No FPException seems suspect but manual doesn't say it does let mayRaiseFPException = 0 in { @@ -250,6 +256,25 @@ let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in { } // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1 } // End isReMaterializable = 1 +let True16Predicate = UseFakeTrue16Insts in +def : GCNPat < +(i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), + (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), + (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), +(V_ALIGNBYTE_B32_fake16_e64 i32:$src0_modifiers, VSrc_b32:$src0, + i32:$src1_modifiers, VSrc_b32:$src1, + i32:$src2_modifiers, VGPR_32:$src2) +>; + +let True16Predicate = UseRealTrue16Insts in +def : GCNPat < +(i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), + (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), + (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), +(V_ALIGNBYTE_B32_t16_e64 i32:$src0_modifiers, VSrc_b32:$src0, + i32:$src1_modifiers, VSrc_b32:$src1, + i32:$src2_modifiers, (i16 (EXTRACT_SUBREG VGPR_32:$src2, lo16))) +>; let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does. let SchedRW = [WriteFloatFMA, WriteSALU] in @@ -1690,7 +1715,7 @@ defm V_FMA_F32 : VOP3_Realtriple_gfx11_gfx12<0x213>; defm V_FMA_F64 : VOP3_Real_Base_gfx11_gfx12<0x214>; defm V_LERP_U8 : VOP3_Realtriple_gfx11_gfx12<0x215>; defm V_ALIGNBIT_B32 : VOP3_Realtriple_gfx11_gfx12<0x216>; -defm V_ALIGNBYTE_B32 : VOP3_Realtriple_gfx11_gfx12<0x217>; +defm V_ALIGNBYTE_B32 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x217, "v_alignbyte_b32">; defm V_MULLIT_F32 : VOP3_Realtriple_gfx11_gfx12<0x218>; defm V_MIN3_F32 : VOP3_Realtriple_gfx11<0x219>; defm V_MIN3_I32 : VOP3_Realtriple_gfx11_gfx12<0x21a>; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index dad91c6a969e8..325dfb33762a6 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -8652,6 +8652,37 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, "coprocessor must be configured as GCP"); break; } + + case ARM::VTOSHH: + case ARM::VTOUHH: + case ARM::VTOSLH: + case ARM::VTOULH: + case ARM::VTOSHS: + case ARM::VTOUHS: + case ARM::VTOSLS: + case ARM::VTOULS: + case ARM::VTOSHD: + case ARM::VTOUHD: + case ARM::VTOSLD: + case ARM::VTOULD: + case ARM::VSHTOH: + case ARM::VUHTOH: + case ARM::VSLTOH: + case ARM::VULTOH: + case ARM::VSHTOS: + case ARM::VUHTOS: + case ARM::VSLTOS: + case ARM::VULTOS: + case ARM::VSHTOD: + case ARM::VUHTOD: + case ARM::VSLTOD: + case ARM::VULTOD: { + if (Operands[MnemonicOpsEndInd]->getReg() != + Operands[MnemonicOpsEndInd + 1]->getReg()) + return Error(Operands[MnemonicOpsEndInd]->getStartLoc(), + "source and destination registers must be the same"); + break; + } } return false; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp index 6e2886a192923..dfaad24e1b877 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -1395,7 +1395,7 @@ static MCAsmBackend *createARMAsmBackend(const Target &T, case Triple::ELF: assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); uint8_t OSABI = Options.FDPIC - ? ELF::ELFOSABI_ARM_FDPIC + ? static_cast(ELF::ELFOSABI_ARM_FDPIC) : MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI, Endian); diff --git a/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h index cb4a7a8c393fc..c5df02fa3b89c 100644 --- a/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h @@ -70,12 +70,12 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo { PacketInfo[MI] |= Hexagon::EndPacket; } bool isStartPacket(const MachineInstr* MI) const { - return (PacketInfo.count(MI) && - (PacketInfo.find(MI)->second & Hexagon::StartPacket)); + auto It = PacketInfo.find(MI); + return It != PacketInfo.end() && (It->second & Hexagon::StartPacket); } bool isEndPacket(const MachineInstr* MI) const { - return (PacketInfo.count(MI) && - (PacketInfo.find(MI)->second & Hexagon::EndPacket)); + auto It = PacketInfo.find(MI); + return It != PacketInfo.end() && (It->second & Hexagon::EndPacket); } void setHasClobberLR(bool v) { HasClobberLR = v; } bool hasClobberLR() const { return HasClobberLR; } diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index 06c629c01d9ab..5331f36ad0999 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -7622,4 +7622,51 @@ multiclass TCGEN05_RELINQ_PERMIT_INTR { defm TCGEN05_RELINQ_CG1: TCGEN05_RELINQ_PERMIT_INTR<"1", int_nvvm_tcgen05_relinq_alloc_permit_cg1>; defm TCGEN05_RELINQ_CG2: TCGEN05_RELINQ_PERMIT_INTR<"2", int_nvvm_tcgen05_relinq_alloc_permit_cg2>; +def tcgen05_wait_ld: NVPTXInst<(outs), (ins), "tcgen05.wait::ld.sync.aligned;", + [(int_nvvm_tcgen05_wait_ld)]>, + Requires<[hasTcgen05Instructions]>; + +def tcgen05_wait_st: NVPTXInst<(outs), (ins), "tcgen05.wait::st.sync.aligned;", + [(int_nvvm_tcgen05_wait_st)]>, + Requires<[hasTcgen05Instructions]>; + +multiclass TCGEN05_COMMIT_INTR { + defvar prefix = "tcgen05.commit.cta_group::" # num; + defvar suffix = ".mbarrier::arrive::one.shared::cluster"; + + defvar intr_suffix = !if(!eq(AS, "shared"), "_shared", "") # "_cg" # num; + defvar Intr = !cast("int_nvvm_tcgen05_commit" # intr_suffix); + defvar IntrMC = !cast("int_nvvm_tcgen05_commit_mc" # intr_suffix); + + def NAME : NVPTXInst<(outs), (ins rc:$mbar), + !strconcat(prefix, suffix, ".b64 [$mbar];"), + [(Intr rc:$mbar)]>, + Requires<[hasTcgen05Instructions]>; + def NAME # _MC : NVPTXInst<(outs), (ins rc:$mbar, Int16Regs:$mc), + !strconcat(prefix, suffix, ".multicast::cluster.b64 [$mbar], $mc;"), + [(IntrMC rc:$mbar, Int16Regs:$mc)]>, + Requires<[hasTcgen05Instructions]>; +} + +defm TCGEN05_COMMIT_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_CG2 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S64_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S64_CG2 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S32_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S32_CG2 : TCGEN05_COMMIT_INTR; + } // isConvergent + +let hasSideEffects = 1 in { + +def tcgen05_fence_before_thread_sync: NVPTXInst<(outs), (ins), + "tcgen05.fence::before_thread_sync;", + [(int_nvvm_tcgen05_fence_before_thread_sync)]>, + Requires<[hasTcgen05Instructions]>; + +def tcgen05_fence_after_thread_sync: NVPTXInst<(outs), (ins), + "tcgen05.fence::after_thread_sync;", + [(int_nvvm_tcgen05_fence_after_thread_sync)]>, + Requires<[hasTcgen05Instructions]>; + +} // hasSideEffects diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index c51c4201ebd18..ac87d72b7595c 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -130,7 +130,7 @@ class RISCVAsmParser : public MCTargetAsmParser { void emitToStreamer(MCStreamer &S, const MCInst &Inst); // Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that - // synthesize the desired immedate value into the destination register. + // synthesize the desired immediate value into the destination register. void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out); // Helper to emit a combination of AUIPC and SecondOpcode. Used to implement @@ -280,7 +280,7 @@ class RISCVAsmParser : public MCTargetAsmParser { std::unique_ptr defaultFRMArgLegacyOp() const; public: - enum RISCVMatchResultTy { + enum RISCVMatchResultTy : unsigned { Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY, #define GET_OPERAND_DIAGNOSTIC_TYPES #include "RISCVGenAsmMatcher.inc" @@ -1116,18 +1116,21 @@ struct RISCVOperand final : public MCParsedAsmOperand { switch (Kind) { case KindTy::Immediate: - OS << *getImm(); + OS << ""; break; case KindTy::FPImmediate: + OS << ""; break; case KindTy::Register: - OS << ""; + OS << "" : ")>"); break; case KindTy::Token: OS << "'" << getToken() << "'"; break; case KindTy::SystemRegister: - OS << "'; + OS << ""; break; case KindTy::VType: OS << "::max(), "operand either must be a bare symbol name or an immediate integer in " "the range"); - case Match_InvalidImmZero: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "immediate must be zero"); - } case Match_InvalidUImmLog2XLen: if (isRV64()) return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 6) - 1); @@ -1654,47 +1653,10 @@ bool RISCVAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, "operand must be a valid system register " "name or an integer in the range"); } - case Match_InvalidLoadFPImm: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be a valid floating-point constant"); - } - case Match_InvalidBareSymbol: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be a bare symbol name"); - } - case Match_InvalidPseudoJumpSymbol: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be a valid jump target"); - } - case Match_InvalidCallSymbol: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be a bare symbol name"); - } - case Match_InvalidTPRelAddSymbol: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be a symbol with %tprel_add modifier"); - } - case Match_InvalidTLSDESCCallSymbol: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, - "operand must be a symbol with %tlsdesc_call modifier"); - } - case Match_InvalidRTZArg: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be 'rtz' floating-point rounding mode"); - } case Match_InvalidVTypeI: { SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); return generateVTypeError(ErrorLoc); } - case Match_InvalidVMaskRegister: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be v0.t"); - } - case Match_InvalidVMaskCarryInRegister: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operand must be v0"); - } case Match_InvalidSImm5Plus1: { return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 4) + 1, (1 << 4), @@ -1703,26 +1665,14 @@ bool RISCVAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidSImm26: return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 25), (1 << 25) - 1); - case Match_InvalidRlist: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error( - ErrorLoc, - "operand must be {ra [, s0[-sN]]} or {x1 [, x8[-x9][, x18[-xN]]]}"); - } - case Match_InvalidStackAdj: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error( - ErrorLoc, - "stack adjustment is invalid for this instruction and register list; " - "refer to Zc spec for a detailed range of stack adjustment"); - } case Match_InvalidRnumArg: { return generateImmOutOfRangeError(Operands, ErrorInfo, 0, 10); } - case Match_InvalidRegReg: { - SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); - return Error(ErrorLoc, "operands must be register and register"); } + + if (const char *MatchDiag = getMatchKindDiag((RISCVMatchResultTy)Result)) { + SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc(); + return Error(ErrorLoc, MatchDiag); } llvm_unreachable("Unknown match type detected!"); @@ -2626,7 +2576,7 @@ ParseStatus RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) { std::unique_ptr OptionalImmOp; if (getLexer().isNot(AsmToken::LParen)) { - // Parse an Integer token. We do not accept arbritrary constant expressions + // Parse an Integer token. We do not accept arbitrary constant expressions // in the offset field (because they may include parens, which complicates // parsing a lot). int64_t ImmVal; diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index d5d422226281b..62fbe55dffba1 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -621,7 +621,7 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, return; } - // We found an ICmp, do some canonicalizations. + // We found an ICmp, do some canonicalization. // Adjust comparisons to use comparison with 0 if possible. if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) { @@ -735,7 +735,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return true; } case TargetOpcode::G_FCONSTANT: { - // TODO: Use constant pool for complext constants. + // TODO: Use constant pool for complex constants. // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32. Register DstReg = MI.getOperand(0).getReg(); const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF(); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp index 26725cf7decbe..06ae8e1296e51 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp @@ -175,7 +175,7 @@ static unsigned extractRotateInfo(int64_t Val) { static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res) { - assert(Val > 0 && "Expected postive val"); + assert(Val > 0 && "Expected positive val"); unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val); uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp index 99f57f47835ab..72b3e56c8a72f 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp @@ -21,7 +21,7 @@ using namespace llvm; -// This option controls wether or not we emit ELF attributes for ABI features, +// This option controls whether or not we emit ELF attributes for ABI features, // like RISC-V atomics or X3 usage. static cl::opt RiscvAbiAttr( "riscv-abi-attributes", diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp index b1990409754b0..7dcf2ba2ac405 100644 --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -1089,7 +1089,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI, bool hasVLOutput = RISCV::isFaultFirstLoad(*MI); for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) { const MachineOperand &MO = MI->getOperand(OpNo); - // Skip vl ouput. It should be the second output. + // Skip vl output. It should be the second output. if (hasVLOutput && OpNo == 1) continue; diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index f050977c55e19..51aa8d7d307e4 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1020,7 +1020,7 @@ def HasStdExtSmctrOrSsctr : Predicate<"Subtarget->hasStdExtSmctrOrSsctr()">, // Vendor extensions //===----------------------------------------------------------------------===// -// Ventana Extenions +// Ventana Extensions def FeatureVendorXVentanaCondOps : RISCVExtension<1, 0, "Ventana Conditional Ops">; @@ -1337,7 +1337,7 @@ def HasVendorXqcilo // LLVM specific features and extensions //===----------------------------------------------------------------------===// -// Feature32Bit exists to mark CPUs that support RV32 to distinquish them from +// Feature32Bit exists to mark CPUs that support RV32 to distinguish them from // tuning CPU names. def Feature32Bit : SubtargetFeature<"32bit", "IsRV32", "true", "Implements RV32">; diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index b21f2d4b39e46..6abf45591d78e 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1182,7 +1182,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF, if (getLibCallID(MF, CSI) != -1) { // tail __riscv_restore_[0-12] instruction is considered as a terminator, - // therefor it is unnecessary to place any CFI instructions after it. Just + // therefore it is unnecessary to place any CFI instructions after it. Just // deallocate stack if needed and return. if (StackSize != 0) deallocateStack(MF, MBB, MBBI, DL, StackSize, @@ -1847,11 +1847,16 @@ bool RISCVFrameLowering::assignCalleeSavedSpillSlots( MFI.setStackID(FrameIdx, TargetStackID::ScalableVector); } - // Allocate a fixed object that covers the full push or libcall size. if (RVFI->isPushable(MF)) { - if (int64_t PushSize = RVFI->getRVPushStackSize()) - MFI.CreateFixedSpillStackObject(PushSize, -PushSize); + // Allocate a fixed object that covers all the registers that are pushed. + if (unsigned PushedRegs = RVFI->getRVPushRegs()) { + int64_t PushedRegsBytes = + static_cast(PushedRegs) * (STI.getXLen() / 8); + MFI.CreateFixedSpillStackObject(PushedRegsBytes, -PushedRegsBytes); + } } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) { + // Allocate a fixed object that covers all of the stack allocated by the + // libcall. int64_t LibCallFrameSize = alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign()); MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize); diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp index 39c0af7985971..82c0d8d4738a4 100644 --- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp @@ -131,7 +131,7 @@ static std::pair matchStridedStart(Value *Start, } // Not a constant, maybe it's a strided constant with a splat added or - // multipled. + // multiplied. auto *BO = dyn_cast(Start); if (!BO || (BO->getOpcode() != Instruction::Add && BO->getOpcode() != Instruction::Or && diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 82fb8fb8ccc69..ec2e8f1d50264 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -3499,7 +3499,7 @@ bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5, } // Select VL as a 5 bit immediate or a value that will become a register. This -// allows us to choose betwen VSETIVLI or VSETVLI later. +// allows us to choose between VSETIVLI or VSETVLI later. bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) { auto *C = dyn_cast(N); if (C && isUInt<5>(C->getZExtValue())) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h index 592f517358506..bb786e4b2bb40 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -278,6 +278,7 @@ struct VLX_VSXPseudo { #define GET_RISCVVSETable_DECL #define GET_RISCVVLXTable_DECL #define GET_RISCVVSXTable_DECL +#include "RISCVGenSearchableTables.inc" } // namespace RISCV } // namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index d91ba33c23596..13ce566f8def6 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, if (isInt<32>(Val)) return true; - // A constant pool entry may be more aligned thant he load we're trying to + // A constant pool entry may be more aligned than the load we're trying to // replace. If we don't support unaligned scalar mem, prefer the constant // pool. // TODO: Can the caller pass down the alignment? @@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; if (!DstVT.isVector()) { - // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate + // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate // the result. if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) || Src.getValueType() == MVT::bf16) { @@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, // Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND // STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to -// qNan and coverting the new source to integer and back to FP. +// qNan and converting the new source to integer and back to FP. static SDValue lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { @@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, // Freeze the source since we are increasing the number of uses. Src = DAG.getFreeze(Src); - // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src. + // Convert sNan to qNan by executing x + x for all unordered element x in Src. MVT MaskVT = Mask.getSimpleValueType(); SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL, DAG.getVTList(MaskVT, MVT::Other), @@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG, unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen()); NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen()); // If we have to use more than one INSERT_VECTOR_ELT then this - // optimization is likely to increase code size; avoid peforming it in + // optimization is likely to increase code size; avoid performing it in // such a case. We can use a load from a constant pool in this case. if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits) return SDValue(); @@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef Mask) { int MaskSrc = M < Size ? 0 : 1; // Compute which of the two target values this index should be assigned to. - // This reflects whether the high elements are remaining or the low elemnts + // This reflects whether the high elements are remaining or the low elements // are remaining. int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc; @@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue RHS = CondV.getOperand(1); ISD::CondCode CCVal = cast(CondV.getOperand(2))->get(); - // Special case for a select of 2 constants that have a diffence of 1. + // Special case for a select of 2 constants that have a difference of 1. // Normally this is done by DAGCombine, but if the select is introduced by // type legalization or op legalization, we miss it. Restricting to SETLT // case for now because that is what signed saturating add/sub need. @@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, // We need to convert from a scalable VF to a vsetvli with VLMax equal to // (vscale * VF). The vscale and VF are independent of element width. We use // SEW=8 for the vsetvli because it is the only element width that supports all -// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is +// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is // (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The // InsertVSETVLI pass can fix up the vtype of the vsetvli if a different // SEW and LMUL are better for the surrounding vector instructions. @@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, return; if (IsStrict) { SDValue Chain = N->getOperand(0); - // In absense of Zfh, promote f16 to f32, then convert. + // In absence of Zfh, promote f16 to f32, then convert. if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) { Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other}, @@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(Res.getValue(1)); return; } - // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then + // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then // convert. if ((Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) || @@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, if (!isTypeLegal(Op0VT)) return; - // In absense of Zfh, promote f16 to f32, then convert. + // In absence of Zfh, promote f16 to f32, then convert. if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0); @@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, static unsigned getVecReduceOpcode(unsigned Opc) { switch (Opc) { default: - llvm_unreachable("Unhandled binary to transfrom reduction"); + llvm_unreachable("Unhandled binary to transform reduction"); case ISD::ADD: return ISD::VECREDUCE_ADD; case ISD::UMAX: @@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG, auto BinOpToRVVReduce = [](unsigned Opc) { switch (Opc) { default: - llvm_unreachable("Unhandled binary to transfrom reduction"); + llvm_unreachable("Unhandled binary to transform reduction"); case ISD::ADD: return RISCVISD::VECREDUCE_ADD_VL; case ISD::UMAX: @@ -15577,7 +15577,7 @@ struct NodeExtensionHelper { bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT, const RISCVSubtarget &Subtarget) { - // Any f16 extension will neeed zvfh + // Any f16 extension will need zvfh if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) return false; // The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with @@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N, if (Base1 != Base2) continue; - // Check if the offsets match the XTHeadMemPair encoding contraints. + // Check if the offsets match the XTHeadMemPair encoding constraints. bool Valid = false; if (MemVT == MVT::i32) { // Check for adjacent i32 values and a 2-bit index. @@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, } // Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if -// the result is used as the conditon of a br_cc or select_cc we can invert, +// the result is used as the condition of a br_cc or select_cc we can invert, // inverting the setcc is free, and Z is 0/1. Caller will invert the // br_cc/select_cc. static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) { @@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) { return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0)); } -// Perform common combines for BR_CC and SELECT_CC condtions. +// Perform common combines for BR_CC and SELECT_CC conditions. static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { ISD::CondCode CCVal = cast(CC)->get(); @@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, const int64_t Addend = SimpleVID->Addend; // Note: We don't need to check alignment here since (by assumption - // from the existance of the gather), our offsets must be sufficiently + // from the existence of the gather), our offsets must be sufficiently // aligned. const EVT PtrVT = getPointerTy(DAG.getDataLayout()); @@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments( EVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT XLenVT = Subtarget.getXLenVT(); unsigned XLenInBytes = Subtarget.getXLen() / 8; - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp index c006fba4af4bc..4660a975b20ae 100644 --- a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp +++ b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // // The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the -// beginning of each basic block or function that is referenced by an indrect +// beginning of each basic block or function that is referenced by an indirect // jump/call instruction. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index b56a39d8316d1..4a74906ed3cc3 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1069,7 +1069,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI)); if (VLOp.isImm()) { int64_t Imm = VLOp.getImm(); - // Conver the VLMax sentintel to X0 register. + // Convert the VLMax sentintel to X0 register. if (Imm == RISCV::VLMaxSentinel) { // If we know the exact VLEN, see if we can use the constant encoding // for the VLMAX instead. This reduces register pressure slightly. diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp index fe593a3cabad7..7df04fc225b0b 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp @@ -170,10 +170,10 @@ struct BlockData { // Indicates if the block uses VXRM. Uninitialized means no use. VXRMInfo VXRMUse; - // Indicates the VXRM output from the block. Unitialized means transparent. + // Indicates the VXRM output from the block. Uninitialized means transparent. VXRMInfo VXRMOut; - // Keeps track of the available VXRM value at the start of the basic bloc. + // Keeps track of the available VXRM value at the start of the basic block. VXRMInfo AvailableIn; // Keeps track of the available VXRM value at the end of the basic block. @@ -384,8 +384,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) { PInfo.AvailableOut.getVXRMImm() == BBInfo.AnticipatedIn.getVXRMImm()) continue; - // If the predecessor anticipates this value for all its succesors, - // then a write to VXRM would have already occured before this block is + // If the predecessor anticipates this value for all its successors, + // then a write to VXRM would have already occurred before this block is // executed. if (PInfo.AnticipatedOut.isStatic() && PInfo.AnticipatedOut.getVXRMImm() == @@ -429,7 +429,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) { // If all our successors anticipate a value, do the insert. // NOTE: It's possible that not all predecessors of our successor provide the // correct value. This can occur on critical edges. If we don't split the - // critical edge we'll also have a write vxrm in the succesor that is + // critical edge we'll also have a write vxrm in the successor that is // redundant with this one. if (PendingInsert || (BBInfo.AnticipatedOut.isStatic() && diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index 013c26c72bfd5..cea28bdce284c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -232,7 +232,7 @@ class RVInstCommon not a vector pseudo // 1 -> default value for vector pseudos. not widening or narrowing. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 12a7af0750813..773319ba908c8 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1516,7 +1516,7 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI, SeenMIs.erase(DefMI); // If MI is inside a loop, and DefMI is outside the loop, then kill flags on - // DefMI would be invalid when tranferred inside the loop. Checking for a + // DefMI would be invalid when transferred inside the loop. Checking for a // loop is expensive, but at least remove kill flags if they are in different // BBs. if (DefMI->getParent() != MI.getParent()) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 54fee1ac3130e..fde7dc89dd693 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -125,6 +125,7 @@ def ImmZeroAsmOperand : AsmOperandClass { let Name = "ImmZero"; let RenderMethod = "addImmOperands"; let DiagnosticType = !strconcat("Invalid", Name); + let DiagnosticString = "immediate must be zero"; } // A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored. @@ -323,6 +324,7 @@ def BareSymbol : AsmOperandClass { let Name = "BareSymbol"; let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidBareSymbol"; + let DiagnosticString = "operand must be a bare symbol name"; let ParserMethod = "parseBareSymbol"; } @@ -335,6 +337,7 @@ def CallSymbol : AsmOperandClass { let Name = "CallSymbol"; let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidCallSymbol"; + let DiagnosticString = "operand must be a bare symbol name"; let ParserMethod = "parseCallSymbol"; } @@ -347,6 +350,7 @@ def PseudoJumpSymbol : AsmOperandClass { let Name = "PseudoJumpSymbol"; let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidPseudoJumpSymbol"; + let DiagnosticString = "operand must be a valid jump target"; let ParserMethod = "parsePseudoJumpSymbol"; } @@ -359,6 +363,7 @@ def TPRelAddSymbol : AsmOperandClass { let Name = "TPRelAddSymbol"; let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidTPRelAddSymbol"; + let DiagnosticString = "operand must be a symbol with %tprel_add modifier"; let ParserMethod = "parseOperandWithModifier"; } @@ -1779,6 +1784,7 @@ def TLSDESCCallSymbol : AsmOperandClass { let Name = "TLSDESCCallSymbol"; let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidTLSDESCCallSymbol"; + let DiagnosticString = "operand must be a symbol with %tlsdesc_call modifier"; let ParserMethod = "parseOperandWithModifier"; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td index 671e493fb3763..fdb2334b131da 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -48,6 +48,7 @@ def VMaskAsmOperand : AsmOperandClass { let IsOptional = 1; let DefaultMethod = "defaultMaskRegOp"; let DiagnosticType = "InvalidVMaskRegister"; + let DiagnosticString = "operand must be v0.t"; } def VMaskCarryInAsmOperand : AsmOperandClass { @@ -55,6 +56,7 @@ def VMaskCarryInAsmOperand : AsmOperandClass { let RenderMethod = "addRegOperands"; let PredicateMethod = "isV0Reg"; let DiagnosticType = "InvalidVMaskCarryInRegister"; + let DiagnosticString = "operand must be v0"; } def VMaskOp : RegisterOperand { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 268bfe70673a2..fe85d4b074c87 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2136,23 +2136,6 @@ multiclass VPseudoBinaryRoundingMode TargetConstraintType = 1, - bit Commutable = 0> { - let VLMul = MInfo.value, isCommutable = Commutable in { - def "_" # MInfo.MX : VPseudoBinaryNoMask; - let ForceTailAgnostic = true in - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask, - RISCVMaskedPseudo; - } -} - multiclass VPseudoBinaryEmul TargetConstraintType = 1, - bit Commutable = 0> { - defm _VV : VPseudoBinaryM { + let VLMul = m.value, isCommutable = Commutable in { + def "_" # m.MX : + VPseudoBinaryNoMask; + let ForceTailAgnostic = true in + def "_" # m.MX # "_MASK" : + VPseudoBinaryMOutMask; + TargetConstraintType = 2>, + RISCVMaskedPseudo; + } } -multiclass VPseudoBinaryM_VX TargetConstraintType = 1> { - defm "_VX" : - VPseudoBinaryM; +multiclass VPseudoBinaryM_VV { + defm _VV : VPseudoBinaryM; } -multiclass VPseudoBinaryM_VF TargetConstraintType = 1> { - defm "_V" # f.FX : - VPseudoBinaryM; +multiclass VPseudoBinaryM_VX { + defm _VX : VPseudoBinaryM; } -multiclass VPseudoBinaryM_VI TargetConstraintType = 1> { - defm _VI : VPseudoBinaryM; +multiclass VPseudoBinaryM_VF { + defm "_V" # f.FX : VPseudoBinaryM; +} + +multiclass VPseudoBinaryM_VI { + defm _VI : VPseudoBinaryM; } multiclass VPseudoVGTR_VV_VX_VI { @@ -3397,11 +3387,11 @@ multiclass VPseudoVWMAC_VV_VF_BF_RM { multiclass VPseudoVCMPM_VV_VX_VI { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>; - defm "" : VPseudoBinaryM_VX, + defm "" : VPseudoBinaryM_VX, SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; - defm "" : VPseudoBinaryM_VI, + defm "" : VPseudoBinaryM_VI, SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>; } } @@ -3409,22 +3399,32 @@ multiclass VPseudoVCMPM_VV_VX_VI { multiclass VPseudoVCMPM_VV_VX { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>; - defm "" : VPseudoBinaryM_VX, + defm "" : VPseudoBinaryM_VX, + SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; + } +} + +multiclass VPseudoVCMPM_VX_VI { + foreach m = MxList in { + defvar mx = m.MX; + defm "" : VPseudoBinaryM_VX, SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; + defm "" : VPseudoBinaryM_VI, + SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>; } } multiclass VPseudoVCMPM_VV_VF { foreach m = MxListF in { - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV", m.MX>; } foreach f = FPList in { foreach m = f.MxList in { - defm "" : VPseudoBinaryM_VF, + defm "" : VPseudoBinaryM_VF, SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>; } } @@ -3433,22 +3433,12 @@ multiclass VPseudoVCMPM_VV_VF { multiclass VPseudoVCMPM_VF { foreach f = FPList in { foreach m = f.MxList in { - defm "" : VPseudoBinaryM_VF, + defm "" : VPseudoBinaryM_VF, SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>; } } } -multiclass VPseudoVCMPM_VX_VI { - foreach m = MxList in { - defvar mx = m.MX; - defm "" : VPseudoBinaryM_VX, - SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; - defm "" : VPseudoBinaryM_VI, - SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>; - } -} - multiclass VPseudoVRED_VS { foreach m = MxList in { defvar mx = m.MX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td index b98934d8c6396..bade4863ad348 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td @@ -514,6 +514,7 @@ def CVrrAsmOperand : AsmOperandClass { let Name = "RegReg"; let ParserMethod = "parseRegReg"; let DiagnosticType = "InvalidRegReg"; + let DiagnosticString = "operands must be register and register"; } def CVrr : Operand, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td index 5cc16765d4ae2..9dfbcf678d6eb 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td @@ -39,12 +39,15 @@ def RlistAsmOperand : AsmOperandClass { let Name = "Rlist"; let ParserMethod = "parseReglist"; let DiagnosticType = "InvalidRlist"; + let DiagnosticString = "operand must be {ra [, s0[-sN]]} or {x1 [, x8[-x9][, x18[-xN]]]}"; } def StackAdjAsmOperand : AsmOperandClass { let Name = "StackAdj"; let ParserMethod = "parseZcmpStackAdj"; let DiagnosticType = "InvalidStackAdj"; + let DiagnosticString = "stack adjustment is invalid for this instruction and register list; " + "refer to Zc spec for a detailed range of stack adjustment"; let PredicateMethod = "isSpimm"; let RenderMethod = "addSpimmOperands"; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td index ab54b45f4de93..a539ca82b7462 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfa.td @@ -30,6 +30,7 @@ def LoadFPImmOperand : AsmOperandClass { let ParserMethod = "parseFPImm"; let RenderMethod = "addFPImmOperands"; let DiagnosticType = "InvalidLoadFPImm"; + let DiagnosticString = "operand must be a valid floating-point constant"; } def loadfpimm : Operand { @@ -43,6 +44,7 @@ def RTZArg : AsmOperandClass { let Name = "RTZArg"; let RenderMethod = "addFRMArgOperands"; let DiagnosticType = "InvalidRTZArg"; + let DiagnosticString = "operand must be 'rtz' floating-point rounding mode"; let ParserMethod = "parseFRMArg"; } diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp index df5501e37f831..5453753fa4579 100644 --- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp +++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp @@ -17,7 +17,7 @@ // For case 1, if a compressed register is available, then the uncompressed // register is copied to the compressed register and its uses are replaced. // -// For example, storing zero uses the uncompressible zero register: +// For example, storing zero uses the incompressible zero register: // sw zero, 0(a0) # if zero // sw zero, 8(a0) # if zero // sw zero, 4(a0) # if zero @@ -275,7 +275,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) { // rather than used. // // For stores, we can change SrcDest (and Base if SrcDest == Base) but - // cannot resolve an uncompressible offset in this case. + // cannot resolve an incompressible offset in this case. if (isCompressibleStore(MI)) { if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) && !NewBaseAdjust) @@ -313,7 +313,7 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI, // If RegImm.Reg is modified by this instruction, then we cannot optimize // past this instruction. If the register is already compressed, then it may // possible to optimize a large offset in the current instruction - this - // will have been detected by the preceeding call to + // will have been detected by the preceding call to // getRegImmPairPreventingCompression. if (MI.modifiesRegister(RegImm.Reg, TRI)) break; @@ -409,7 +409,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) { LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n"); for (MachineInstr &MI : MBB) { // Determine if this instruction would otherwise be compressed if not for - // an uncompressible register or offset. + // an incompressible register or offset. RegImmPair RegImm = getRegImmPairPreventingCompression(MI); if (!RegImm.Reg && RegImm.Imm == 0) continue; diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp index a324deb4e48f5..bbbb1e1595982 100644 --- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp +++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp @@ -434,8 +434,8 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi, // Memory constraints have two operands. if (NumOps != 2 || !Flags.isMemKind()) { - // If the register is used by something other than a memory contraint, - // we should not fold. + // If the register is used by something other than a memory + // constraint, we should not fold. for (unsigned J = 0; J < NumOps; ++J) { const MachineOperand &MO = UseMI.getOperand(I + 1 + J); if (MO.isReg() && MO.getReg() == DestReg) diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 7a99bfd1b2512..12d54313a96ab 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -286,7 +286,7 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB, // instruction. This saves 1 instruction over the full lui/addi+add fallback // path. We avoid anything which can be done with a single lui as it might // be compressible. Note that the sh1add case is fully covered by the 2x addi - // case just above and is thus ommitted. + // case just above and is thus omitted. if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) { unsigned Opc = 0; if (isShiftedInt<12, 3>(Val)) { diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td index 9f7cd411a4943..f4d2073d3b52d 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td @@ -159,7 +159,7 @@ class SiFive7GetReductionCycles { ); } -/// Cycles for ordered reductions take approximatley 6*VL cycles +/// Cycles for ordered reductions take approximately 6*VL cycles class SiFive7GetOrderedReductionCycles { defvar VLEN = 512; // (VLEN * LMUL) / SEW @@ -224,7 +224,7 @@ def SiFive7VS : ProcResource<1>; // Store sequencer // vector unit is ready to dequeue them. The unit dequeues up to one instruction // per cycle, in order, as soon as the sequencer for that type of instruction is // available. This resource is meant to be used for 1 cycle by all vector -// instructions, to model that only one vector instruction may be dequed at a +// instructions, to model that only one vector instruction may be dequeued at a // time. The actual dequeueing into the sequencer is modeled by the VA, VL, and // VS sequencer resources below. Each of them will only accept a single // instruction at a time and remain busy for the number of cycles associated diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td index 6b9f1dd321891..0204ab4c98286 100644 --- a/llvm/lib/Target/RISCV/RISCVScheduleV.td +++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td @@ -70,7 +70,7 @@ multiclass LMULSEWWriteResMXSEW resources, // behavior is aliased to a Variant. The Variant has Latency predLad and // ReleaseAtCycles predCycles if the SchedPredicate Pred is true, otherwise has // Latency noPredLat and ReleaseAtCycles noPredCycles. The WorstCase SchedWrite -// is created similiarly if IsWorstCase is true. +// is created similarly if IsWorstCase is true. multiclass LMULWriteResMXVariant resources, int predLat, list predAcquireCycles, diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 3e4949232298e..4a69bdeb76161 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -78,7 +78,7 @@ static cl::opt EnableRISCVCopyPropagation( static cl::opt EnableRISCVDeadRegisterElimination( "riscv-enable-dead-defs", cl::Hidden, cl::desc("Enable the pass that removes dead" - " definitons and replaces stores to" + " definitions and replaces stores to" " them with stores to x0"), cl::init(true)); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 812592365a41a..b3ddd07902a5c 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -673,7 +673,7 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, // We use 2 for the cost of the mask materialization as this is the true // cost for small masks and most shuffles are small. At worst, this cost // should be a very small constant for the constant pool load. As such, - // we may bias towards large selects slightly more than truely warranted. + // we may bias towards large selects slightly more than truly warranted. return LT.first * (1 + getRISCVInstructionCost({RISCV::VMV_S_X, RISCV::VMERGE_VVM}, LT.second, CostKind)); @@ -2396,7 +2396,7 @@ InstructionCost RISCVTTIImpl::getPointersChainCost( // either GEP instructions, PHIs, bitcasts or constants. When we have same // base, we just calculate cost of each non-Base GEP as an ADD operation if // any their index is a non-const. - // If no known dependecies between the pointers cost is calculated as a sum + // If no known dependencies between the pointers cost is calculated as a sum // of costs of GEP instructions. for (auto [I, V] : enumerate(Ptrs)) { const auto *GEP = dyn_cast(V); @@ -2440,7 +2440,7 @@ void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, if (ST->enableDefaultUnroll()) return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE); - // Enable Upper bound unrolling universally, not dependant upon the conditions + // Enable Upper bound unrolling universally, not dependent upon the conditions // below. UP.UpperBound = true; diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp index c48a97b12e43f..0bddbacc89e3e 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp @@ -12,7 +12,7 @@ // // The reason why we need to do this: // 1. When tracking register pressure, we don't track physical registers. -// 2. We have a RegisterClass for mask reigster (which is `VMV0`), but we don't +// 2. We have a RegisterClass for mask register (which is `VMV0`), but we don't // use it in most RVV pseudos (only used in inline asm constraint and add/sub // with carry instructions). Instead, we use physical register V0 directly // and insert a `$v0 = COPY ...` before the use. And, there is a fundamental diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index 702206b8e0dc5..719abde3d7fa6 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -748,7 +748,12 @@ Type *SPIRVEmitIntrinsics::deduceElementTypeHelper( if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) { auto *ImageType = cast(II->getOperand(0)->getType()); assert(ImageType->getTargetExtName() == "spirv.Image"); - Ty = ImageType->getTypeParameter(0); + (void)ImageType; + if (II->hasOneUse()) { + auto *U = *II->users().begin(); + Ty = cast(U)->getAccessType(); + assert(Ty && "Unable to get type for resource pointer."); + } } else if (Function *CalledF = CI->getCalledFunction()) { std::string DemangledName = getOclOrSpirvBuiltinDemangledName(CalledF->getName()); diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp index ee456a11d5844..d7f315d82b832 100644 --- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp @@ -1048,6 +1048,13 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I, break; } + assert((LhsReg.isVirtual() && RhsReg.isVirtual()) && + "Both arguments of FCMP need to be virtual!"); + auto *LhsBank = RBI.getRegBank(LhsReg, MRI, TRI); + [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI); + assert((LhsBank == RhsBank) && + "Both banks assigned to FCMP arguments need to be same!"); + // Compute the opcode for the CMP instruction. unsigned OpCmp; LLT Ty = MRI.getType(LhsReg); @@ -1055,10 +1062,15 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I, default: return false; case 32: - OpCmp = X86::UCOMISSrr; + OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32 + : X86::UCOMISSrr; break; case 64: - OpCmp = X86::UCOMISDrr; + OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64 + : X86::UCOMISDrr; + break; + case 80: + OpCmp = X86::UCOM_FpIr80; break; } diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index 88b5ec8cd004f..c73179c4c9466 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -452,10 +452,9 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, // fp comparison getActionDefinitionsBuilder(G_FCMP) - .legalIf([=](const LegalityQuery &Query) { - return (HasSSE1 && typePairInSet(0, 1, {{s8, s32}})(Query)) || - (HasSSE2 && typePairInSet(0, 1, {{s8, s64}})(Query)); - }) + .legalFor(HasSSE1 || UseX87, {s8, s32}) + .legalFor(HasSSE2 || UseX87, {s8, s64}) + .legalFor(UseX87, {s8, s80}) .clampScalar(0, s8, s8) .clampScalar(1, s32, HasSSE2 ? s64 : s32) .widenScalarToNextPow2(1); diff --git a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp index 43c0145ec8e2a..42faf4299c6d5 100644 --- a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp @@ -321,8 +321,8 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { unsigned Size = Ty1.getSizeInBits(); (void)Size; - assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP"); - + assert((Size == 32 || Size == 64 || Size == 80) && + "Unsupported size for G_FCMP"); auto FpRegBank = getPartialMappingIdx(MI, Ty1, /* isFP= */ true); OpRegBankIdx = {PMI_GPR8, /* Predicate */ PMI_None, FpRegBank, FpRegBank}; diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index 1074f5f23ffa1..95c42f2b9bd45 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -569,7 +569,7 @@ struct OMPInformationCache : public InformationCache { for (RuntimeFunction Fn : Fns) { RuntimeFunctionInfo &RFI = RFIs[Fn]; - if (RFI.Declaration && RFI.Declaration->isDeclaration()) + if (!RFI.Declaration || RFI.Declaration->isDeclaration()) return false; } return true; @@ -5792,6 +5792,7 @@ PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { CallGraphUpdater CGUpdater; bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || + LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink || LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ nullptr, PostLink); @@ -5871,6 +5872,7 @@ PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, CGUpdater.initialize(CG, C, AM, UR); bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || + LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink || LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; SetVector Functions(SCC.begin(), SCC.end()); OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 81c88673d48dc..4616ea6ab5487 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -610,8 +610,13 @@ static Value *foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd, APInt NewMask = *ConstB & *ConstD; if (NewMask == *ConstB) return LHS; - if (NewMask == *ConstD) + if (NewMask == *ConstD) { + if (IsLogical) { + if (auto *RHSI = dyn_cast(RHS)) + RHSI->dropPoisonGeneratingFlags(); + } return RHS; + } } if (Mask & AMask_NotAllOnes) { diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp index 91c48338d0320..7d8bc3aa4c589 100644 --- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp @@ -970,6 +970,7 @@ static void readMemprof(Module &M, Function &F, IndexedInstrProfReader *MemProfReader, const TargetLibraryInfo &TLI, std::map &FullStackIdToAllocMatchInfo, + std::set> &MatchedCallSites, DenseMap &UndriftMaps) { auto &Ctx = M.getContext(); // Previously we used getIRPGOFuncName() here. If F is local linkage, @@ -1210,6 +1211,13 @@ readMemprof(Module &M, Function &F, IndexedInstrProfReader *MemProfReader, addCallsiteMetadata(I, InlinedCallStack, Ctx); // Only need to find one with a matching call stack and add a single // callsite metadata. + + // Accumulate call site matching information upon request. + if (ClPrintMemProfMatchInfo) { + std::vector CallStack; + append_range(CallStack, InlinedCallStack); + MatchedCallSites.insert(std::move(CallStack)); + } break; } } @@ -1266,13 +1274,17 @@ PreservedAnalyses MemProfUsePass::run(Module &M, ModuleAnalysisManager &AM) { // it to an allocation in the IR. std::map FullStackIdToAllocMatchInfo; + // Set of the matched call sites, each expressed as a sequence of an inline + // call stack. + std::set> MatchedCallSites; + for (auto &F : M) { if (F.isDeclaration()) continue; const TargetLibraryInfo &TLI = FAM.getResult(F); readMemprof(M, F, MemProfReader.get(), TLI, FullStackIdToAllocMatchInfo, - UndriftMaps); + MatchedCallSites, UndriftMaps); } if (ClPrintMemProfMatchInfo) { @@ -1281,6 +1293,13 @@ PreservedAnalyses MemProfUsePass::run(Module &M, ModuleAnalysisManager &AM) { << " context with id " << Id << " has total profiled size " << Info.TotalSize << (Info.Matched ? " is" : " not") << " matched\n"; + + for (const auto &CallStack : MatchedCallSites) { + errs() << "MemProf callsite match for inline call stack"; + for (uint64_t StackId : CallStack) + errs() << " " << StackId; + errs() << "\n"; + } } return PreservedAnalyses::none(); diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index c6f015112e59d..1c2e1531e47d8 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -564,21 +564,20 @@ unsigned int GVNHoist::rank(const Value *V) const { } bool GVNHoist::hasEH(const BasicBlock *BB) { - auto It = BBSideEffects.find(BB); - if (It != BBSideEffects.end()) + auto [It, Inserted] = BBSideEffects.try_emplace(BB); + if (!Inserted) return It->second; if (BB->isEHPad() || BB->hasAddressTaken()) { - BBSideEffects[BB] = true; + It->second = true; return true; } if (BB->getTerminator()->mayThrow()) { - BBSideEffects[BB] = true; + It->second = true; return true; } - BBSideEffects[BB] = false; return false; } diff --git a/llvm/lib/Transforms/Vectorize/CMakeLists.txt b/llvm/lib/Transforms/Vectorize/CMakeLists.txt index e5fabd318b82c..872e055294d55 100644 --- a/llvm/lib/Transforms/Vectorize/CMakeLists.txt +++ b/llvm/lib/Transforms/Vectorize/CMakeLists.txt @@ -9,6 +9,7 @@ add_llvm_component_library(LLVMVectorize SandboxVectorizer/Legality.cpp SandboxVectorizer/Passes/BottomUpVec.cpp SandboxVectorizer/Passes/RegionsFromMetadata.cpp + SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp SandboxVectorizer/SandboxVectorizer.cpp SandboxVectorizer/SandboxVectorizerPassBuilder.cpp SandboxVectorizer/Scheduler.cpp diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 0b40b733ffe7e..04b392829f0d7 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -1335,8 +1335,9 @@ void Vectorizer::mergeEquivalenceClasses(EquivalenceClassMap &EQClasses) const { const auto &Key = EC.first; EqClassReducedKey RedKey{std::get<1>(Key), std::get<2>(Key), std::get<3>(Key)}; - RedKeyToUOMap[RedKey].insert(std::get<0>(Key)); - if (RedKeyToUOMap[RedKey].size() > 1) + auto &UOMap = RedKeyToUOMap[RedKey]; + UOMap.insert(std::get<0>(Key)); + if (UOMap.size() > 1) FoundPotentiallyOptimizableEC = true; } if (!FoundPotentiallyOptimizableEC) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 02b79f2053d59..dacee6445072a 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1454,9 +1454,10 @@ class LoopVectorizationCostModel { // FIXME: Investigate opportunity for fixed vector factor. // FIXME: support fixed-order recurrences by fixing splice of non VFxUF // penultimate EVL. - bool EVLIsLegal = - UserIC <= 1 && TTI.hasActiveVectorLength(0, nullptr, Align()) && - !EnableVPlanNativePath && Legal->getFixedOrderRecurrences().empty(); + bool EVLIsLegal = UserIC <= 1 && IsScalableVF && + TTI.hasActiveVectorLength(0, nullptr, Align()) && + !EnableVPlanNativePath && + Legal->getFixedOrderRecurrences().empty(); if (!EVLIsLegal) { // If for some reason EVL mode is unsupported, fallback to // DataWithoutLaneMask to try to vectorize the loop with folded tail @@ -4109,7 +4110,8 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { // found modulo the vectorization factor is not zero, try to fold the tail // by masking. // FIXME: look for a smaller MaxVF that does divide TC rather than masking. - setTailFoldingStyles(MaxFactors.ScalableVF.isScalable(), UserIC); + bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero(); + setTailFoldingStyles(ContainsScalableVF, UserIC); if (foldTailByMasking()) { if (getTailFoldingStyle() == TailFoldingStyle::DataWithEVL) { LLVM_DEBUG( @@ -4120,8 +4122,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { // Tail folded loop using VP intrinsics restricts the VF to be scalable // for now. // TODO: extend it for fixed vectors, if required. - assert(MaxFactors.ScalableVF.isScalable() && - "Expected scalable vector factor."); + assert(ContainsScalableVF && "Expected scalable vector factor."); MaxFactors.FixedVF = ElementCount::getFixed(1); } @@ -4558,11 +4559,10 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() { InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1)); LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); - assert(any_of(VPlans, - [](std::unique_ptr &P) { - return P->hasVF(ElementCount::getFixed(1)); - }) && - "Expected Scalar VF to be a candidate"); + assert( + any_of(VPlans, + [](std::unique_ptr &P) { return P->hasScalarVFOnly(); }) && + "Expected Scalar VF to be a candidate"); const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost, ExpectedCost); @@ -6284,7 +6284,6 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) { // Find the cost of vectorizing the call, if we can find a suitable // vector variant of the function. - bool UsesMask = false; VFInfo FuncInfo; Function *VecFunc = nullptr; // Search through any available variants for one we can use at this VF. @@ -6336,7 +6335,6 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) { break; } case VFParamKind::GlobalPredicate: - UsesMask = true; break; default: ParamsOk = false; @@ -6353,19 +6351,8 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) { break; } - // Add in the cost of synthesizing a mask if one wasn't required. - InstructionCost MaskCost = 0; - if (VecFunc && UsesMask && !MaskRequired) - MaskCost = TTI.getShuffleCost( - TargetTransformInfo::SK_Broadcast, - VectorType::get(IntegerType::getInt1Ty( - VecFunc->getFunctionType()->getContext()), - VF), - {}, CostKind); - if (TLI && VecFunc && !CI->isNoBuiltin()) - VectorCost = - TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind) + MaskCost; + VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind); // Find the cost of an intrinsic; some targets may have instructions that // perform the operation without needing an actual call. @@ -7685,6 +7672,8 @@ DenseMap LoopVectorizationPlanner::executePlan( VPlanTransforms::runPass(VPlanTransforms::unrollByUF, BestVPlan, BestUF, OrigLoop->getHeader()->getContext()); VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE); + VPlanTransforms::simplifyRecipes(BestVPlan, *Legal->getWidestInductionType()); + VPlanTransforms::removeDeadRecipes(BestVPlan); VPlanTransforms::convertToConcreteRecipes(BestVPlan); // Perform the actual loop transformation. @@ -8757,13 +8746,6 @@ bool VPRecipeBuilder::getScaledReductions( if (!CM.TheLoop->contains(RdxExitInstr)) return false; - // TODO: Allow scaling reductions when predicating. The select at - // the end of the loop chooses between the phi value and most recent - // reduction result, both of which have different VFs to the active lane - // mask when scaling. - if (CM.blockNeedsPredicationForAnyReason(RdxExitInstr->getParent())) - return false; - auto *Update = dyn_cast(RdxExitInstr); if (!Update) return false; @@ -8925,8 +8907,19 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction, isa(BinOpRecipe)) std::swap(BinOp, Accumulator); - return new VPPartialReductionRecipe(Reduction->getOpcode(), BinOp, - Accumulator, Reduction); + unsigned ReductionOpcode = Reduction->getOpcode(); + if (CM.blockNeedsPredicationForAnyReason(Reduction->getParent())) { + assert((ReductionOpcode == Instruction::Add || + ReductionOpcode == Instruction::Sub) && + "Expected an ADD or SUB operation for predicated partial " + "reductions (because the neutral element in the mask is zero)!"); + VPValue *Mask = getBlockInMask(Reduction->getParent()); + VPValue *Zero = + Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0)); + BinOp = Builder.createSelect(Mask, BinOp, Zero, Reduction->getDebugLoc()); + } + return new VPPartialReductionRecipe(ReductionOpcode, BinOp, Accumulator, + Reduction); } void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, @@ -8937,7 +8930,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) { VFRange SubRange = {VF, MaxVFTimes2}; if (auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) { - bool HasScalarVF = Plan->hasVF(ElementCount::getFixed(1)); + bool HasScalarVF = Plan->hasScalarVFOnly(); // Now optimize the initial VPlan. if (!HasScalarVF) VPlanTransforms::runPass(VPlanTransforms::truncateToMinimalBitwidths, @@ -9734,7 +9727,11 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( // beginning of the dedicated latch block. auto *OrigExitingVPV = PhiR->getBackedgeValue(); auto *NewExitingVPV = PhiR->getBackedgeValue(); - if (!PhiR->isInLoop() && CM.foldTailByMasking()) { + // Don't output selects for partial reductions because they have an output + // with fewer lanes than the VF. So the operands of the select would have + // different numbers of lanes. Partial reductions mask the input instead. + if (!PhiR->isInLoop() && CM.foldTailByMasking() && + !isa(OrigExitingVPV->getDefiningRecipe())) { VPValue *Cond = RecipeBuilder.getBlockInMask(OrigLoop->getHeader()); assert(OrigExitingVPV->getDefiningRecipe()->getParent() != LatchVPBB && "reduction recipe must be defined before latch"); diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp index 2680667afc4de..06a5e3bed7f03 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp @@ -368,6 +368,9 @@ MemDGNode *DependencyGraph::getMemDGNodeAfter(DGNode *N, bool IncludingN, } void DependencyGraph::notifyCreateInstr(Instruction *I) { + if (Ctx->getTracker().getState() == Tracker::TrackerState::Reverting) + // We don't maintain the DAG while reverting. + return; // Nothing to do if the node is not in the focus range of the DAG. if (!(DAGInterval.contains(I) || DAGInterval.touches(I))) return; @@ -405,6 +408,9 @@ void DependencyGraph::notifyCreateInstr(Instruction *I) { } void DependencyGraph::notifyMoveInstr(Instruction *I, const BBIterator &To) { + if (Ctx->getTracker().getState() == Tracker::TrackerState::Reverting) + // We don't maintain the DAG while reverting. + return; // NOTE: This function runs before `I` moves to its new destination. BasicBlock *BB = To.getNodeParent(); assert(!(To != BB->end() && &*To == I->getNextNode()) && @@ -472,6 +478,9 @@ void DependencyGraph::notifyMoveInstr(Instruction *I, const BBIterator &To) { } void DependencyGraph::notifyEraseInstr(Instruction *I) { + if (Ctx->getTracker().getState() == Tracker::TrackerState::Reverting) + // We don't maintain the DAG while reverting. + return; // Update the MemDGNode chain if this is a memory node. if (auto *MemN = dyn_cast_or_null(getNodeOrNull(I))) { auto *PrevMemN = getMemDGNodeBefore(MemN, /*IncludingN=*/false); diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp index 6f65657d29790..507d163240127 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp @@ -12,6 +12,7 @@ #include "llvm/SandboxIR/Function.h" #include "llvm/SandboxIR/Instruction.h" #include "llvm/SandboxIR/Module.h" +#include "llvm/SandboxIR/Region.h" #include "llvm/SandboxIR/Utils.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/SeedCollector.h" @@ -448,13 +449,24 @@ bool BottomUpVec::runOnFunction(Function &F, const Analyses &A) { assert(SeedSlice.size() >= 2 && "Should have been rejected!"); - // TODO: If vectorization succeeds, run the RegionPassManager on the - // resulting region. - // TODO: Refactor to remove the unnecessary copy to SeedSliceVals. SmallVector SeedSliceVals(SeedSlice.begin(), SeedSlice.end()); - Change |= tryVectorize(SeedSliceVals); + // Create an empty region. Instructions get added to the region + // automatically by the callbacks. + auto &Ctx = F.getContext(); + Region Rgn(Ctx, A.getTTI()); + // Save the state of the IR before we make any changes. The + // transaction gets accepted/reverted by the tr-accept-or-revert pass. + Ctx.save(); + // Try to vectorize starting from the seed slice. The returned value + // is true if we found vectorizable code and generated some vector + // code for it. It does not mean that the code is profitable. + bool VecSuccess = tryVectorize(SeedSliceVals); + if (VecSuccess) + // WARNING: All passes should return false, except those that + // accept/revert the state. + Change |= RPM.runOnRegion(Rgn, A); } } } diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def index 0dc72842f1abe..f3aa12729860f 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def @@ -19,6 +19,8 @@ REGION_PASS("null", ::llvm::sandboxir::NullPass) REGION_PASS("print-instruction-count", ::llvm::sandboxir::PrintInstructionCount) +REGION_PASS("tr-accept", ::llvm::sandboxir::TransactionAlwaysAccept) +REGION_PASS("tr-accept-or-revert", ::llvm::sandboxir::TransactionAcceptOrRevert) #undef REGION_PASS diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp new file mode 100644 index 0000000000000..874390ba2daae --- /dev/null +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp @@ -0,0 +1,37 @@ +//===- TransactionAcceptOrRevert.cpp - Check cost and accept/revert region ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/InstructionCost.h" + +namespace llvm { + +static cl::opt CostThreshold("sbvec-cost-threshold", cl::init(0), + cl::Hidden, + cl::desc("Vectorization cost threshold.")); + +namespace sandboxir { + +bool TransactionAcceptOrRevert::runOnRegion(Region &Rgn, const Analyses &A) { + const auto &SB = Rgn.getScoreboard(); + InstructionCost CostAfterMinusBefore = SB.getAfterCost() - SB.getBeforeCost(); + // TODO: Print costs / write to remarks. + auto &Tracker = Rgn.getContext().getTracker(); + if (CostAfterMinusBefore < -CostThreshold) { + bool HasChanges = !Tracker.empty(); + Tracker.accept(); + return HasChanges; + } + // Revert the IR. + Rgn.getContext().getTracker().revert(); + return false; +} + +} // namespace sandboxir +} // namespace llvm diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp index 798a0ad915375..b233d35212f94 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp @@ -31,9 +31,10 @@ static cl::opt UserDefinedPassPipeline( SandboxVectorizerPass::SandboxVectorizerPass() : FPM("fpm") { if (UserDefinedPassPipeline == DefaultPipelineMagicStr) { - // TODO: Add region passes to the default pipeline. + // TODO: Add passes to the default pipeline. It currently contains: + // - the bottom-up-vectorizer pass FPM.setPassPipeline( - "bottom-up-vec<>", + "bottom-up-vec", sandboxir::SandboxVectorizerPassBuilder::createFunctionPass); } else { // Create the user-defined pipeline. diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp index 5ecf7b2ed0d25..0c1ab55e91a5c 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp @@ -4,6 +4,8 @@ #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/NullPass.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/PrintInstructionCount.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/RegionsFromMetadata.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAlwaysAccept.h" namespace llvm::sandboxir { diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 9988e03e9fdca..3816e1b61576a 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -3669,8 +3669,8 @@ class VPlan { VFs.insert(VF); } - bool hasVF(ElementCount VF) { return VFs.count(VF); } - bool hasScalableVF() { + bool hasVF(ElementCount VF) const { return VFs.count(VF); } + bool hasScalableVF() const { return any_of(VFs, [](ElementCount VF) { return VF.isScalable(); }); } @@ -3680,7 +3680,12 @@ class VPlan { return {VFs.begin(), VFs.end()}; } - bool hasScalarVFOnly() const { return VFs.size() == 1 && VFs[0].isScalar(); } + bool hasScalarVFOnly() const { + bool HasScalarVFOnly = VFs.size() == 1 && VFs[0].isScalar(); + assert(HasScalarVFOnly == hasVF(ElementCount::getFixed(1)) && + "Plan with scalar VF should only have a single VF"); + return HasScalarVFOnly; + } bool hasUF(unsigned UF) const { return UFs.empty() || UFs.contains(UF); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h index 8c11d93734667..ebc82c01467cf 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h +++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h @@ -155,6 +155,14 @@ struct Recipe_match { if ((!matchRecipeAndOpcode(R) && ...)) return false; + if (!(std::is_same_v || ...) && + isa(R)) { + // Don't match VPWidenEVLRecipe if it is not explicitly part of RecipeTys. + // Otherwise we might match it unexpectedly when trying to match + // VPWidenRecipe, of which VPWidenEVLRecipe is a subclass of. + return false; + } + assert(R->getNumOperands() == std::tuple_size::value && "recipe with matched opcode the expected number of operands"); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index b734ddfce788e..bc80c5ea0b1b2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -285,6 +285,13 @@ VPPartialReductionRecipe::computeCost(ElementCount VF, VPCostContext &Ctx) const { std::optional Opcode = std::nullopt; VPRecipeBase *BinOpR = getOperand(0)->getDefiningRecipe(); + + // If the partial reduction is predicated, a select will be operand 0 rather + // than the binary op + using namespace llvm::VPlanPatternMatch; + if (match(getOperand(0), m_Select(m_VPValue(), m_VPValue(), m_VPValue()))) + BinOpR = BinOpR->getOperand(1)->getDefiningRecipe(); + if (auto *WidenR = dyn_cast(BinOpR)) Opcode = std::make_optional(WidenR->getOpcode()); @@ -713,6 +720,23 @@ Value *VPInstruction::generate(VPTransformState &State) { InstructionCost VPInstruction::computeCost(ElementCount VF, VPCostContext &Ctx) const { + if (Instruction::isBinaryOp(getOpcode())) { + if (!getUnderlyingValue()) { + // TODO: Compute cost for VPInstructions without underlying values once + // the legacy cost model has been retired. + return 0; + } + + assert(!doesGeneratePerAllLanes() && + "Should only generate a vector value or single scalar, not scalars " + "for all lanes."); + Type *ResTy = Ctx.Types.inferScalarType(this); + if (!vputils::onlyFirstLaneUsed(this)) + ResTy = toVectorTy(ResTy, VF); + + return Ctx.TTI.getArithmeticInstrCost(getOpcode(), ResTy, Ctx.CostKind); + } + switch (getOpcode()) { case VPInstruction::AnyOf: { auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF); @@ -720,7 +744,10 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, Instruction::Or, cast(VecTy), std::nullopt, Ctx.CostKind); } default: - // TODO: Fill out other opcodes! + // TODO: Compute cost other VPInstructions once the legacy cost model has + // been retired. + assert(!getUnderlyingValue() && + "unexpected VPInstruction witht underlying value"); return 0; } } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7e9ef46133936..6c917e4eef655 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -591,7 +591,7 @@ static SmallVector collectUsersRecursively(VPValue *V) { static void legalizeAndOptimizeInductions(VPlan &Plan) { using namespace llvm::VPlanPatternMatch; VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); - bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1)); + bool HasOnlyVectorVFs = !Plan.hasScalarVFOnly(); VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi()); for (VPRecipeBase &Phi : HeaderVPBB->phis()) { auto *PhiR = dyn_cast(&Phi); @@ -964,9 +964,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1)); } -/// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all -/// un-typed live-ins in VPTypeAnalysis. -static void simplifyRecipes(VPlan &Plan, Type &CanonicalIVTy) { +void VPlanTransforms::simplifyRecipes(VPlan &Plan, Type &CanonicalIVTy) { ReversePostOrderTraversal> RPOT( Plan.getEntry()); VPTypeAnalysis TypeInfo(&CanonicalIVTy); @@ -1043,7 +1041,6 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, } Term->eraseFromParent(); - VPlanTransforms::removeDeadRecipes(Plan); Plan.setVF(BestVF); Plan.setUF(BestUF); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 0cd4cf1f22a7d..3dd476a8526d6 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -163,6 +163,10 @@ struct VPlanTransforms { /// Lower abstract recipes to concrete ones, that can be codegen'd. static void convertToConcreteRecipes(VPlan &Plan); + /// Perform instcombine-like simplifications on recipes in \p Plan. Use \p + /// CanonicalIVTy as type for all un-typed live-ins in VPTypeAnalysis. + static void simplifyRecipes(VPlan &Plan, Type &CanonicalIVTy); + /// If there's a single exit block, optimize its phi recipes that use exiting /// IV values by feeding them precomputed end values instead, possibly taken /// one step backwards. diff --git a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll index 9aa096b952be5..9cf3e06d58a88 100644 --- a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll +++ b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll @@ -1,22 +1,31 @@ -; RUN: opt -passes=indvars -S < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes='print' \ +; RUN: -scalar-evolution-classify-expressions=0 -disable-output %s 2>&1 | FileCheck %s define void @infer_via_ranges(ptr %arr, i32 %n) { -; CHECK-LABEL: @infer_via_ranges +; CHECK-LABEL: 'infer_via_ranges' +; CHECK-NEXT: Determining loop execution counts for: @infer_via_ranges +; CHECK-NEXT: Loop %loop: backedge-taken count is ((-1 + %n) umin %n) +; CHECK-NEXT: exit count for loop: %n +; CHECK-NEXT: exit count for in.bounds: (-1 + %n) +; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -2147483648 +; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-1 + %n) umin %n) +; CHECK-NEXT: symbolic max exit count for loop: %n +; CHECK-NEXT: symbolic max exit count for in.bounds: (-1 + %n) +; CHECK-NEXT: Loop %loop: Trip multiple is 1 +; entry: %first.itr.check = icmp sgt i32 %n, 0 %start = sub i32 %n, 1 br i1 %first.itr.check, label %loop, label %exit loop: -; CHECK-LABEL: loop: %idx = phi i32 [ %start, %entry ] , [ %idx.dec, %in.bounds ] %idx.dec = sub i32 %idx, 1 %abc = icmp sge i32 %idx, 0 -; CHECK: br i1 true, label %in.bounds, label %out.of.bounds br i1 %abc, label %in.bounds, label %out.of.bounds in.bounds: -; CHECK-LABEL: in.bounds: %addr = getelementptr i32, ptr %arr, i32 %idx store i32 0, ptr %addr %next = icmp sgt i32 %idx.dec, -1 diff --git a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll index b475e68db411a..02ff12c27fcda 100644 --- a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll +++ b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll @@ -1,12 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "(?!^\s*lda.*\bsp\b)^\s*.*\bsp\b" --filter "^\s*(ld|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)" -; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo -global-isel=true -global-isel-abort=2 -O0 | FileCheck %s --check-prefixes=CHECK,GISEL -; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-NOAVOIDLDAPUR +; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.8a -mattr=+rcpc-immo -global-isel=true -global-isel-abort=2 -O0 | FileCheck %s --check-prefixes=CHECK,GISEL +; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo,avoid-ldapur -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mcpu=neoverse-v2 -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mcpu=neoverse-v3 -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mcpu=cortex-x3 -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mcpu=cortex-x4 -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR ; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mcpu=cortex-x925 -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR +; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v9a -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-AVOIDLDAPUR +; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.8a -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-NOAVOIDLDAPUR +; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v9.3a -global-isel=false -O1 | FileCheck %s --check-prefixes=CHECK,SDAG,SDAG-NOAVOIDLDAPUR define i8 @load_atomic_i8_aligned_unordered(ptr %ptr) { ; CHECK-LABEL: load_atomic_i8_aligned_unordered: diff --git a/llvm/test/CodeGen/AArch64/a55-fuse-address.mir b/llvm/test/CodeGen/AArch64/a55-fuse-address.mir index 3e1b6076f0167..4edff043a7b3e 100644 --- a/llvm/test/CodeGen/AArch64/a55-fuse-address.mir +++ b/llvm/test/CodeGen/AArch64/a55-fuse-address.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -o - %s -mtriple=aarch64 -run-pass=machine-scheduler -verify-machineinstrs | FileCheck %s -# RUN: llc -o - %s -mtriple=aarch64 -passes=machine-scheduler | FileCheck %s --- | target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64" diff --git a/llvm/test/CodeGen/AArch64/ampere1-sched-add.mir b/llvm/test/CodeGen/AArch64/ampere1-sched-add.mir index 3a33291cbf8e0..e578b5d7f04f3 100644 --- a/llvm/test/CodeGen/AArch64/ampere1-sched-add.mir +++ b/llvm/test/CodeGen/AArch64/ampere1-sched-add.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 # RUN: llc -run-pass=machine-scheduler %s -o - | FileCheck %s -# RUN: llc -passes=machine-scheduler %s -o - | FileCheck %s --- | target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/CodeGen/AArch64/cluster-frame-index.mir b/llvm/test/CodeGen/AArch64/cluster-frame-index.mir index 5d761f10be3b2..37ab9418f4dbd 100644 --- a/llvm/test/CodeGen/AArch64/cluster-frame-index.mir +++ b/llvm/test/CodeGen/AArch64/cluster-frame-index.mir @@ -1,5 +1,4 @@ #RUN: llc -mtriple=aarch64-- -mcpu=cyclone -run-pass machine-scheduler -o - %s | FileCheck %s -#RUN: llc -mtriple=aarch64-- -mcpu=cyclone -passes=machine-scheduler -o - %s | FileCheck %s --- name: merge_stack # CHECK-LABEL: name: merge_stack diff --git a/llvm/test/CodeGen/AArch64/dump-reserved-cycles.mir b/llvm/test/CodeGen/AArch64/dump-reserved-cycles.mir index 5655bfa5d2945..4bf8afff90d4c 100644 --- a/llvm/test/CodeGen/AArch64/dump-reserved-cycles.mir +++ b/llvm/test/CodeGen/AArch64/dump-reserved-cycles.mir @@ -1,15 +1,9 @@ # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 -misched-dump-reserved-cycles=true \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 -misched-dump-reserved-cycles=true \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s - # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 -misched-dump-reserved-cycles=false\ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s --check-prefix=NODUMP -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 -misched-dump-reserved-cycles=false\ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s --check-prefix=NODUMP - # REQUIRES: asserts --- name: f diff --git a/llvm/test/CodeGen/AArch64/dump-schedule-trace.mir b/llvm/test/CodeGen/AArch64/dump-schedule-trace.mir index c90d6bd3cb420..bff6d1d71b7c4 100644 --- a/llvm/test/CodeGen/AArch64/dump-schedule-trace.mir +++ b/llvm/test/CodeGen/AArch64/dump-schedule-trace.mir @@ -4,34 +4,17 @@ # RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-header-width=21 \ # RUN: 2>&1 | FileCheck %s --check-prefix=TOP --strict-whitespace -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -misched-prera-direction=topdown -sched-print-cycles=true \ -# RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-header-width=21 \ -# RUN: 2>&1 | FileCheck %s --check-prefix=TOP --strict-whitespace - # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s \ # RUN: -misched-prera-direction=bottomup -sched-print-cycles=true \ # RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-width=4 \ # RUN: 2>&1 | FileCheck %s --check-prefix=BOTTOM --strict-whitespace -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -misched-prera-direction=bottomup -sched-print-cycles=true \ -# RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-width=4 \ -# RUN: 2>&1 | FileCheck %s --check-prefix=BOTTOM --strict-whitespace - # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s \ # RUN: -sched-print-cycles=true -misched-dump-schedule-trace=true \ # RUN: 2>&1 | FileCheck %s --check-prefix=BIDIRECTIONAL -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -sched-print-cycles=true -misched-dump-schedule-trace=true \ -# RUN: 2>&1 | FileCheck %s --check-prefix=BIDIRECTIONAL - # REQUIRES: asserts, aarch64-registered-target --- name: f diff --git a/llvm/test/CodeGen/AArch64/force-enable-intervals.mir b/llvm/test/CodeGen/AArch64/force-enable-intervals.mir index 8d47eee1c8e19..a53d4e7480307 100644 --- a/llvm/test/CodeGen/AArch64/force-enable-intervals.mir +++ b/llvm/test/CodeGen/AArch64/force-enable-intervals.mir @@ -3,21 +3,11 @@ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler \ # RUN: -o - %s 2>&1 -misched-prera-direction=topdown | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -misched-dump-reserved-cycles=true \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler \ -# RUN: -o - %s 2>&1 -misched-prera-direction=topdown | FileCheck %s - # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ # RUN: -misched-dump-reserved-cycles=true -sched-model-force-enable-intervals=true \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler \ # RUN: -o - %s 2>&1 -misched-prera-direction=topdown | FileCheck %s --check-prefix=FORCE -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -misched-dump-reserved-cycles=true -sched-model-force-enable-intervals=true \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler \ -# RUN: -o - %s 2>&1 -misched-prera-direction=topdown | FileCheck %s --check-prefix=FORCE - # REQUIRES: asserts, aarch64-registered-target --- name: f diff --git a/llvm/test/CodeGen/AArch64/llvm.modf.ll b/llvm/test/CodeGen/AArch64/llvm.modf.ll new file mode 100644 index 0000000000000..41fe796daca86 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/llvm.modf.ll @@ -0,0 +1,255 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s + +define { half, half } @test_modf_f16(half %a) { +; CHECK-LABEL: test_modf_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: add x0, sp, #12 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr s1, [sp, #12] +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: fcvt h1, s1 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %result = call { half, half } @llvm.modf.f16(half %a) + ret { half, half } %result +} + +define half @test_modf_f16_only_use_fractional_part(half %a) { +; CHECK-LABEL: test_modf_f16_only_use_fractional_part: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: add x0, sp, #12 +; CHECK-NEXT: bl modff +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %result = call { half, half } @llvm.modf.f16(half %a) + %result.0 = extractvalue { half, half } %result, 0 + ret half %result.0 +} + +define half @test_modf_f16_only_use_integral_part(half %a) { +; CHECK-LABEL: test_modf_f16_only_use_integral_part: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: add x0, sp, #12 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr s0, [sp, #12] +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %result = call { half, half } @llvm.modf.f16(half %a) + %result.1 = extractvalue { half, half } %result, 1 + ret half %result.1 +} + +define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) { +; CHECK-LABEL: test_modf_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: mov h1, v0.h[1] +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: add x0, sp, #44 +; CHECK-NEXT: fcvt s0, h1 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: add x0, sp, #40 +; CHECK-NEXT: fcvt s1, h1 +; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: fmov s0, s1 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: fcvt h2, s0 +; CHECK-NEXT: add x0, sp, #56 +; CHECK-NEXT: mov h1, v1.h[2] +; CHECK-NEXT: fcvt s0, h1 +; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: mov v2.h[1], v1.h[0] +; CHECK-NEXT: str q2, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: fcvt h2, s0 +; CHECK-NEXT: add x0, sp, #60 +; CHECK-NEXT: mov h1, v1.h[3] +; CHECK-NEXT: fcvt s0, h1 +; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: mov v1.h[2], v2.h[0] +; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldp s2, s1, [sp, #40] +; CHECK-NEXT: fcvt h4, s0 +; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-NEXT: fcvt h3, s1 +; CHECK-NEXT: fcvt h1, s2 +; CHECK-NEXT: ldr s2, [sp, #56] +; CHECK-NEXT: mov v0.h[3], v4.h[0] +; CHECK-NEXT: fcvt h2, s2 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: mov v1.h[1], v3.h[0] +; CHECK-NEXT: ldr s3, [sp, #60] +; CHECK-NEXT: mov v1.h[2], v2.h[0] +; CHECK-NEXT: fcvt h2, s3 +; CHECK-NEXT: mov v1.h[3], v2.h[0] +; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a) + ret { <2 x half>, <2 x half> } %result +} + +define { float, float } @test_modf_f32(float %a) { +; CHECK-LABEL: test_modf_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: add x0, sp, #12 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr s1, [sp, #12] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %result = call { float, float } @llvm.modf.f32(float %a) + ret { float, float } %result +} + +define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) { +; CHECK-LABEL: test_modf_v3f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -32 +; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: mov s0, v0.s[1] +; CHECK-NEXT: add x0, sp, #56 +; CHECK-NEXT: add x19, sp, #56 +; CHECK-NEXT: bl modff +; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: add x0, sp, #44 +; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-NEXT: add x0, sp, #60 +; CHECK-NEXT: add x20, sp, #60 +; CHECK-NEXT: mov v0.s[1], v1.s[0] +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: mov s0, v0.s[2] +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr s1, [sp, #44] +; CHECK-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-NEXT: ld1 { v1.s }[1], [x19] +; CHECK-NEXT: mov v2.s[2], v0.s[0] +; CHECK-NEXT: ld1 { v1.s }[2], [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: ret + %result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a) + ret { <3 x float>, <3 x float> } %result +} + +define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) { +; CHECK-LABEL: test_modf_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: add x0, sp, #40 +; CHECK-NEXT: add x19, sp, #40 +; CHECK-NEXT: mov s0, v0.s[1] +; CHECK-NEXT: bl modff +; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: add x0, sp, #44 +; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: bl modff +; CHECK-NEXT: ldr s1, [sp, #44] +; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-NEXT: ld1 { v1.s }[1], [x19] +; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: mov v0.s[1], v2.s[0] +; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a) + ret { <2 x float>, <2 x float> } %result +} + +define { double, double } @test_modf_f64(double %a) { +; CHECK-LABEL: test_modf_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: add x0, sp, #8 +; CHECK-NEXT: bl modf +; CHECK-NEXT: ldr d1, [sp, #8] +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %result = call { double, double } @llvm.modf.f64(double %a) + ret { double, double } %result +} + +define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) { +; CHECK-LABEL: test_modf_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: mov d0, v0.d[1] +; CHECK-NEXT: add x0, sp, #32 +; CHECK-NEXT: add x19, sp, #32 +; CHECK-NEXT: bl modf +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: add x0, sp, #40 +; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: bl modf +; CHECK-NEXT: ldr d1, [sp, #40] +; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: ld1 { v1.d }[1], [x19] +; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: mov v0.d[1], v2.d[0] +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a) + ret { <2 x double>, <2 x double> } %result +} diff --git a/llvm/test/CodeGen/AArch64/machine-scheduler.mir b/llvm/test/CodeGen/AArch64/machine-scheduler.mir index ba2c2b33d8e92..6c0222f4fdd78 100644 --- a/llvm/test/CodeGen/AArch64/machine-scheduler.mir +++ b/llvm/test/CodeGen/AArch64/machine-scheduler.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass machine-scheduler -verify-machineinstrs -o - %s | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -passes=machine-scheduler -o - %s | FileCheck %s --- | define i64 @load_imp-def(ptr nocapture %P, i32 %v) { diff --git a/llvm/test/CodeGen/AArch64/macro-fusion-addsub-2reg-const1.mir b/llvm/test/CodeGen/AArch64/macro-fusion-addsub-2reg-const1.mir index 2f0d19fec07d9..8c5a85a4e7a61 100644 --- a/llvm/test/CodeGen/AArch64/macro-fusion-addsub-2reg-const1.mir +++ b/llvm/test/CodeGen/AArch64/macro-fusion-addsub-2reg-const1.mir @@ -1,7 +1,5 @@ # RUN: llc -o - %s -mtriple=aarch64-- -mattr=+fuse-addsub-2reg-const1 -run-pass postmisched | FileCheck %s --check-prefixes=CHECK,FUSION -# RUN: llc -o - %s -mtriple=aarch64-- -mattr=+fuse-addsub-2reg-const1 -passes=postmisched | FileCheck %s --check-prefixes=CHECK,FUSION # RUN: llc -o - %s -mtriple=aarch64-- -mattr=-fuse-addsub-2reg-const1 -run-pass postmisched | FileCheck %s --check-prefixes=CHECK,NOFUSION -# RUN: llc -o - %s -mtriple=aarch64-- -mattr=-fuse-addsub-2reg-const1 -passes=postmisched | FileCheck %s --check-prefixes=CHECK,NOFUSION --- # CHECK-LABEL: name: addsub2reg # CHECK: $w8 = ADDWrr killed renamable $w0, killed renamable $w1 diff --git a/llvm/test/CodeGen/AArch64/macro-fusion-last.mir b/llvm/test/CodeGen/AArch64/macro-fusion-last.mir index affd2bb039e96..14937a4794e96 100644 --- a/llvm/test/CodeGen/AArch64/macro-fusion-last.mir +++ b/llvm/test/CodeGen/AArch64/macro-fusion-last.mir @@ -1,7 +1,5 @@ # RUN: llc -o - %s -mtriple=aarch64-- -mattr=+arith-bcc-fusion -run-pass postmisched | FileCheck %s --check-prefixes=CHECK,FUSION -# RUN: llc -o - %s -mtriple=aarch64-- -mattr=+arith-bcc-fusion -passes=postmisched | FileCheck %s --check-prefixes=CHECK,FUSION # RUN: llc -o - %s -mtriple=aarch64-- -mattr=-arith-bcc-fusion -run-pass postmisched | FileCheck %s --check-prefixes=CHECK,NOFUSION -# RUN: llc -o - %s -mtriple=aarch64-- -mattr=-arith-bcc-fusion -passes=postmisched | FileCheck %s --check-prefixes=CHECK,NOFUSION # Make sure the last instruction is correctly macro-fused when scheduling # top-down (post-ra). --- diff --git a/llvm/test/CodeGen/AArch64/misched-branch-targets.mir b/llvm/test/CodeGen/AArch64/misched-branch-targets.mir index 954082631bdbf..40f148438e537 100644 --- a/llvm/test/CodeGen/AArch64/misched-branch-targets.mir +++ b/llvm/test/CodeGen/AArch64/misched-branch-targets.mir @@ -1,9 +1,6 @@ # RUN: llc -o - -run-pass=machine-scheduler -misched=shuffle %s | FileCheck %s # RUN: llc -o - -run-pass=postmisched %s | FileCheck %s -# RUN: llc -o - -passes=machine-scheduler -misched=shuffle %s | FileCheck %s -# RUN: llc -o - -passes=postmisched %s | FileCheck %s - # REQUIRES: asserts # -misched=shuffle is only available with assertions enabled diff --git a/llvm/test/CodeGen/AArch64/misched-bundle.mir b/llvm/test/CodeGen/AArch64/misched-bundle.mir index 8463cb038a3bc..ac6112e8c60ef 100644 --- a/llvm/test/CodeGen/AArch64/misched-bundle.mir +++ b/llvm/test/CodeGen/AArch64/misched-bundle.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a510 -run-pass=machine-scheduler -debug-only=machine-scheduler %s -o - 2>&1 | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a510 -passes=machine-scheduler -debug-only=machine-scheduler %s -o - 2>&1 | FileCheck %s # REQUIRES: asserts # CHECK: SU(0): renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1) diff --git a/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-01.mir b/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-01.mir index ca92fa14a3fa8..ea40f9e52dcd6 100644 --- a/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-01.mir +++ b/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-01.mir @@ -6,14 +6,6 @@ # RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-header-width=21 \ # RUN: | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -mcpu=cortex-a55 %s -o - 2>&1 \ -# RUN: -misched-dump-reserved-cycles=true \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler \ -# RUN: -misched-prera-direction=bottomup -sched-print-cycles=true \ -# RUN: -misched-detail-resource-booking=true \ -# RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-header-width=21 \ -# RUN: | FileCheck %s - # REQUIRES: asserts, aarch64-registered-target --- | diff --git a/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-02.mir b/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-02.mir index 2b34ca54f1e97..9be91b8a01e86 100644 --- a/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-02.mir +++ b/llvm/test/CodeGen/AArch64/misched-detail-resource-booking-02.mir @@ -5,13 +5,6 @@ # RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-width=4 \ # RUN: 2>&1 | FileCheck %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a55 \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -misched-prera-direction=bottomup -sched-print-cycles=true \ -# RUN: -misched-dump-reserved-cycles=true -misched-detail-resource-booking=true\ -# RUN: -misched-dump-schedule-trace=true -misched-dump-schedule-trace-col-width=4 \ -# RUN: 2>&1 | FileCheck %s - # REQUIRES: asserts, aarch64-registered-target --- name: f diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-arith-logic.mir b/llvm/test/CodeGen/AArch64/misched-fusion-arith-logic.mir index 60c0026d39466..62276779d1423 100644 --- a/llvm/test/CodeGen/AArch64/misched-fusion-arith-logic.mir +++ b/llvm/test/CodeGen/AArch64/misched-fusion-arith-logic.mir @@ -1,7 +1,5 @@ # RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mattr=fuse-arith-logic -run-pass=machine-scheduler -misched-print-dags | FileCheck %s -# RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mattr=fuse-arith-logic -passes=machine-scheduler -misched-print-dags | FileCheck %s # RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mcpu=exynos-m4 -run-pass=machine-scheduler -misched-print-dags | FileCheck %s -# RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mcpu=exynos-m4 -passes=machine-scheduler -misched-print-dags | FileCheck %s # REQUIRES: asserts --- diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-cmp.mir b/llvm/test/CodeGen/AArch64/misched-fusion-cmp.mir index 82498164c6ad5..b0450c5b8c01b 100644 --- a/llvm/test/CodeGen/AArch64/misched-fusion-cmp.mir +++ b/llvm/test/CodeGen/AArch64/misched-fusion-cmp.mir @@ -1,5 +1,4 @@ # RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mcpu=cortex-x1 -run-pass=machine-scheduler -# RUN: llc -o /dev/null 2>&1 %s -mtriple aarch64-unknown -mcpu=cortex-x1 -passes=machine-scheduler # Just ensure this doesn't crash. --- diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-crypto-eor.mir b/llvm/test/CodeGen/AArch64/misched-fusion-crypto-eor.mir index e661353615726..623a8221f5ed2 100644 --- a/llvm/test/CodeGen/AArch64/misched-fusion-crypto-eor.mir +++ b/llvm/test/CodeGen/AArch64/misched-fusion-crypto-eor.mir @@ -1,9 +1,6 @@ # RUN: llc -o /dev/null %s -run-pass=machine-scheduler -mtriple aarch64-- -mattr=-fuse-aes,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,NOFUSE # RUN: llc -o /dev/null %s -run-pass=machine-scheduler -mtriple aarch64-- -mattr=+fuse-aes,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,FUSEAES # RUN: llc -o /dev/null %s -run-pass=machine-scheduler -mtriple aarch64-- -mattr=+fuse-aes,+fuse-crypto-eor,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,FUSEAES,FUSECRYPTO -# RUN: llc -o /dev/null %s -passes=machine-scheduler -mtriple aarch64-- -mattr=-fuse-aes,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,NOFUSE -# RUN: llc -o /dev/null %s -passes=machine-scheduler -mtriple aarch64-- -mattr=+fuse-aes,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,FUSEAES -# RUN: llc -o /dev/null %s -passes=machine-scheduler -mtriple aarch64-- -mattr=+fuse-aes,+fuse-crypto-eor,+crypto -misched-print-dags 2>&1 | FileCheck %s --check-prefixes=CHECK,FUSEAES,FUSECRYPTO # REQUIRES: asserts name: func diff --git a/llvm/test/CodeGen/AArch64/misched-move-imm.mir b/llvm/test/CodeGen/AArch64/misched-move-imm.mir index 65608bb5f1a1c..b5ff01b3c5b13 100644 --- a/llvm/test/CodeGen/AArch64/misched-move-imm.mir +++ b/llvm/test/CodeGen/AArch64/misched-move-imm.mir @@ -1,5 +1,4 @@ # RUN: llc -run-pass=machine-scheduler -mtriple=aarch64-linux-gnu -mcpu=neoverse-v2 %s -o /dev/null 2>&1 -# RUN: llc -passes=machine-scheduler -mtriple=aarch64-linux-gnu -mcpu=neoverse-v2 %s -o /dev/null 2>&1 # Just ensure this doesn't crash. Ensures in the neoverse-v2 # scheduling model we don't attempt to treat the first input # operand of MOVZXi as an immediate operand. diff --git a/llvm/test/CodeGen/AArch64/misched-predicate-virtreg.mir b/llvm/test/CodeGen/AArch64/misched-predicate-virtreg.mir index 17a6cf7e6faa9..0b14ceeef9a09 100644 --- a/llvm/test/CodeGen/AArch64/misched-predicate-virtreg.mir +++ b/llvm/test/CodeGen/AArch64/misched-predicate-virtreg.mir @@ -1,5 +1,4 @@ # RUN: llc -mcpu=exynos-m5 -mtriple=aarch64 -enable-misched -run-pass=machine-scheduler -debug-only=machine-scheduler %s -o /dev/null 2>&1 | FileCheck %s -# RUN: llc -mcpu=exynos-m5 -mtriple=aarch64 -enable-misched -passes=machine-scheduler -debug-only=machine-scheduler %s -o /dev/null 2>&1 | FileCheck %s # REQUIRES: asserts # CHECK-LABEL: ********** MI Scheduling ********** diff --git a/llvm/test/CodeGen/AArch64/misched-sort-resource-in-trace.mir b/llvm/test/CodeGen/AArch64/misched-sort-resource-in-trace.mir index b652d2463fc12..b04fd89b796ba 100644 --- a/llvm/test/CodeGen/AArch64/misched-sort-resource-in-trace.mir +++ b/llvm/test/CodeGen/AArch64/misched-sort-resource-in-trace.mir @@ -3,21 +3,11 @@ # RUN: -misched-prera-direction=topdown -sched-print-cycles=true \ # RUN: -misched-dump-schedule-trace=true --misched-sort-resources-in-trace=true 2>&1 | FileCheck --check-prefix=SORTED %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=exynos-m3 -verify-machineinstrs \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -misched-prera-direction=topdown -sched-print-cycles=true \ -# RUN: -misched-dump-schedule-trace=true --misched-sort-resources-in-trace=true 2>&1 | FileCheck --check-prefix=SORTED %s - # RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=exynos-m3 -verify-machineinstrs \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s \ # RUN: -misched-prera-direction=topdown -sched-print-cycles=true \ # RUN: -misched-dump-schedule-trace=true --misched-sort-resources-in-trace=false 2>&1 | FileCheck --check-prefix=UNSORTED %s -# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=exynos-m3 -verify-machineinstrs \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s \ -# RUN: -misched-prera-direction=topdown -sched-print-cycles=true \ -# RUN: -misched-dump-schedule-trace=true --misched-sort-resources-in-trace=false 2>&1 | FileCheck --check-prefix=UNSORTED %s - # REQUIRES: asserts, aarch64-registered-target --- name: test diff --git a/llvm/test/CodeGen/AArch64/sched-postidxalias.mir b/llvm/test/CodeGen/AArch64/sched-postidxalias.mir index 02256ca30d842..98ee0fa21b2dd 100644 --- a/llvm/test/CodeGen/AArch64/sched-postidxalias.mir +++ b/llvm/test/CodeGen/AArch64/sched-postidxalias.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=aarch64 -mcpu=cortex-a55 -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s -# RUN: llc -mtriple=aarch64 -mcpu=cortex-a55 -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s # REQUIRES: asserts # Both the accesses should have an offset of 0 diff --git a/llvm/test/CodeGen/AArch64/sched-print-cycle.mir b/llvm/test/CodeGen/AArch64/sched-print-cycle.mir index d58037e987773..59c51571df74b 100644 --- a/llvm/test/CodeGen/AArch64/sched-print-cycle.mir +++ b/llvm/test/CodeGen/AArch64/sched-print-cycle.mir @@ -1,15 +1,9 @@ # RUN: llc -mtriple=arm64-apple-macos -mcpu=apple-m1 -sched-print-cycles=true \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s -# RUN: llc -mtriple=arm64-apple-macos -mcpu=apple-m1 -sched-print-cycles=true \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s - # RUN: llc -mtriple=arm64-apple-macos -mcpu=apple-m1 -sched-print-cycles=false \ # RUN: -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s --check-prefix=NOCYCLES -# RUN: llc -mtriple=arm64-apple-macos -mcpu=apple-m1 -sched-print-cycles=false \ -# RUN: -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck %s --check-prefix=NOCYCLES - # REQUIRES: asserts --- name: mul_mul diff --git a/llvm/test/CodeGen/AArch64/scheduledag-constreg.mir b/llvm/test/CodeGen/AArch64/scheduledag-constreg.mir index 66680af3f856b..65ec43407413f 100644 --- a/llvm/test/CodeGen/AArch64/scheduledag-constreg.mir +++ b/llvm/test/CodeGen/AArch64/scheduledag-constreg.mir @@ -1,5 +1,4 @@ # RUN: llc -o /dev/null %s -mtriple=aarch64-- -run-pass=machine-scheduler -enable-misched -debug-only=machine-scheduler 2>&1 | FileCheck %s -# RUN: llc -o /dev/null %s -mtriple=aarch64-- -passes=machine-scheduler -enable-misched -debug-only=machine-scheduler 2>&1 | FileCheck %s # REQUIRES: asserts --- | define void @func() { ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-aliasing.mir b/llvm/test/CodeGen/AArch64/sve-aliasing.mir index 34a08adc417cf..3b7c9fefa5277 100644 --- a/llvm/test/CodeGen/AArch64/sve-aliasing.mir +++ b/llvm/test/CodeGen/AArch64/sve-aliasing.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -o - %s -mtriple=aarch64 -run-pass=machine-scheduler -verify-machineinstrs | FileCheck %s -# RUN: llc -o - %s -mtriple=aarch64 -passes=machine-scheduler | FileCheck %s --- name: scalable_v16i1 diff --git a/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll b/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll new file mode 100644 index 0000000000000..78dbc09a57afd --- /dev/null +++ b/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll @@ -0,0 +1,144 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64-gnu-linux -mattr=+neon,+sve -vector-library=sleefgnuabi < %s | FileCheck %s -check-prefix=SLEEF +; RUN: llc -mtriple=aarch64-gnu-linux -mattr=+neon,+sve -vector-library=ArmPL < %s | FileCheck %s -check-prefix=ARMPL + +define <4 x float> @test_modf_v4f32(<4 x float> %x, ptr %out_integral) { +; SLEEF-LABEL: test_modf_v4f32: +; SLEEF: // %bb.0: +; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; SLEEF-NEXT: .cfi_def_cfa_offset 16 +; SLEEF-NEXT: .cfi_offset w30, -16 +; SLEEF-NEXT: bl _ZGVnN4vl4_modff +; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; SLEEF-NEXT: ret +; +; ARMPL-LABEL: test_modf_v4f32: +; ARMPL: // %bb.0: +; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; ARMPL-NEXT: .cfi_def_cfa_offset 16 +; ARMPL-NEXT: .cfi_offset w30, -16 +; ARMPL-NEXT: bl armpl_vmodfq_f32 +; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; ARMPL-NEXT: ret + %result = call { <4 x float>, <4 x float> } @llvm.modf.v4f32(<4 x float> %x) + %result.0 = extractvalue { <4 x float>, <4 x float> } %result, 0 + %result.1 = extractvalue { <4 x float>, <4 x float> } %result, 1 + store <4 x float> %result.1, ptr %out_integral, align 4 + ret <4 x float> %result.0 +} + +define <2 x double> @test_modf_v2f64(<2 x double> %x, ptr %out_integral) { +; SLEEF-LABEL: test_modf_v2f64: +; SLEEF: // %bb.0: +; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; SLEEF-NEXT: .cfi_def_cfa_offset 16 +; SLEEF-NEXT: .cfi_offset w30, -16 +; SLEEF-NEXT: bl _ZGVnN2vl8_modf +; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; SLEEF-NEXT: ret +; +; ARMPL-LABEL: test_modf_v2f64: +; ARMPL: // %bb.0: +; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; ARMPL-NEXT: .cfi_def_cfa_offset 16 +; ARMPL-NEXT: .cfi_offset w30, -16 +; ARMPL-NEXT: bl armpl_vmodfq_f64 +; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; ARMPL-NEXT: ret + %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %x) + %result.0 = extractvalue { <2 x double>, <2 x double> } %result, 0 + %result.1 = extractvalue { <2 x double>, <2 x double> } %result, 1 + store <2 x double> %result.1, ptr %out_integral, align 8 + ret <2 x double> %result.0 +} + +define @test_modf_nxv4f32( %x, ptr %out_integral) { +; SLEEF-LABEL: test_modf_nxv4f32: +; SLEEF: // %bb.0: +; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; SLEEF-NEXT: .cfi_def_cfa_offset 16 +; SLEEF-NEXT: .cfi_offset w30, -16 +; SLEEF-NEXT: bl _ZGVsNxvl4_modff +; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; SLEEF-NEXT: ret +; +; ARMPL-LABEL: test_modf_nxv4f32: +; ARMPL: // %bb.0: +; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; ARMPL-NEXT: .cfi_def_cfa_offset 16 +; ARMPL-NEXT: .cfi_offset w30, -16 +; ARMPL-NEXT: ptrue p0.s +; ARMPL-NEXT: bl armpl_svmodf_f32_x +; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; ARMPL-NEXT: ret + %result = call { , } @llvm.modf.nxv4f32( %x) + %result.0 = extractvalue { , } %result, 0 + %result.1 = extractvalue { , } %result, 1 + store %result.1, ptr %out_integral, align 4 + ret %result.0 +} + +define @test_modf_nxv2f64( %x, ptr %out_integral) { +; SLEEF-LABEL: test_modf_nxv2f64: +; SLEEF: // %bb.0: +; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; SLEEF-NEXT: .cfi_def_cfa_offset 16 +; SLEEF-NEXT: .cfi_offset w30, -16 +; SLEEF-NEXT: bl _ZGVsNxvl8_modf +; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; SLEEF-NEXT: ret +; +; ARMPL-LABEL: test_modf_nxv2f64: +; ARMPL: // %bb.0: +; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; ARMPL-NEXT: .cfi_def_cfa_offset 16 +; ARMPL-NEXT: .cfi_offset w30, -16 +; ARMPL-NEXT: ptrue p0.d +; ARMPL-NEXT: bl armpl_svmodf_f64_x +; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; ARMPL-NEXT: ret + %result = call { , } @llvm.modf.nxv2f64( %x) + %result.0 = extractvalue { , } %result, 0 + %result.1 = extractvalue { , } %result, 1 + store %result.1, ptr %out_integral, align 8 + ret %result.0 +} + +define <4 x float> @modf_store_merging_load_before_store(<4 x float> %x, ptr %out_integral) { +; SLEEF-LABEL: modf_store_merging_load_before_store: +; SLEEF: // %bb.0: +; SLEEF-NEXT: sub sp, sp, #32 +; SLEEF-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; SLEEF-NEXT: .cfi_def_cfa_offset 32 +; SLEEF-NEXT: .cfi_offset w30, -16 +; SLEEF-NEXT: ldr q1, [x0] +; SLEEF-NEXT: str q1, [sp] // 16-byte Folded Spill +; SLEEF-NEXT: bl _ZGVnN4vl4_modff +; SLEEF-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; SLEEF-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; SLEEF-NEXT: fadd v0.4s, v1.4s, v0.4s +; SLEEF-NEXT: add sp, sp, #32 +; SLEEF-NEXT: ret +; +; ARMPL-LABEL: modf_store_merging_load_before_store: +; ARMPL: // %bb.0: +; ARMPL-NEXT: sub sp, sp, #32 +; ARMPL-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; ARMPL-NEXT: .cfi_def_cfa_offset 32 +; ARMPL-NEXT: .cfi_offset w30, -16 +; ARMPL-NEXT: ldr q1, [x0] +; ARMPL-NEXT: str q1, [sp] // 16-byte Folded Spill +; ARMPL-NEXT: bl armpl_vmodfq_f32 +; ARMPL-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; ARMPL-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; ARMPL-NEXT: fadd v0.4s, v1.4s, v0.4s +; ARMPL-NEXT: add sp, sp, #32 +; ARMPL-NEXT: ret + %result = call { <4 x float>, <4 x float> } @llvm.modf.v4f32(<4 x float> %x) + %result.0 = extractvalue { <4 x float>, <4 x float> } %result, 0 + %result.1 = extractvalue { <4 x float>, <4 x float> } %result, 1 + %original_intergral = load <4 x float>, ptr %out_integral, align 4 + store <4 x float> %result.1, ptr %out_integral, align 4 + %return = fadd <4 x float> %original_intergral, %result.0 + ret <4 x float> %return +} diff --git a/llvm/test/CodeGen/AArch64/zext-shuffle.ll b/llvm/test/CodeGen/AArch64/zext-shuffle.ll index 2965996ddcb02..20d2071d7fe54 100644 --- a/llvm/test/CodeGen/AArch64/zext-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/zext-shuffle.ll @@ -543,3 +543,146 @@ define <8 x double> @uitofp_load_fadd(ptr %p) { ret <8 x double> %c } +define <4 x i32> @isUndefDeInterleave_b0(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_b0: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b1(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b1: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b2(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b2: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b3(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b3: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t0(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_t0: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t1(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t1: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t2(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t2: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t3(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t3: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b0_bad(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_b0_bad: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI40_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI40_0] +; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t1_bad(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t1_bad: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI41_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI41_0] +; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b +; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define i16 @undeftop(<8 x i16> %0) { +; CHECK-LABEL: undeftop: +; CHECK: // %bb.0: +; CHECK-NEXT: dup v0.8h, v0.h[4] +; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: umov w0, v0.h[0] +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %0, <8 x i16> zeroinitializer, <8 x i32> + %3 = zext <8 x i16> %2 to <8 x i64> + %new0 = add <8 x i64> %3, %3 + %last = trunc <8 x i64> %new0 to <8 x i16> + %4 = extractelement <8 x i16> %last, i32 0 + ret i16 %4 +} diff --git a/llvm/test/CodeGen/AMDGPU/at-least-one-def-value-assert.mir b/llvm/test/CodeGen/AMDGPU/at-least-one-def-value-assert.mir index 1c4093b2feb9b..82ee173e12256 100644 --- a/llvm/test/CodeGen/AMDGPU/at-least-one-def-value-assert.mir +++ b/llvm/test/CodeGen/AMDGPU/at-least-one-def-value-assert.mir @@ -1,7 +1,5 @@ # RUN: not --crash llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=machine-scheduler -verify-misched -o /dev/null %s 2>&1 | FileCheck %s -# RUN: not --crash llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -passes=machine-scheduler -verify-misched -o /dev/null %s 2>&1 | FileCheck %s - # CHECK: *** Bad machine code: No live subrange at use *** # CHECK-NEXT: - function: at_least_one_value_should_be_defined_by_this_mask # CHECK-NEXT: - basic block: %bb.0 diff --git a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir index 1ae544f3c074a..0d84dc0bdc53e 100644 --- a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir +++ b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass machine-scheduler %s -o - | FileCheck -check-prefix=GCN %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -passes=machine-scheduler %s -o - | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: cluster_flat_loads # GCN: FLAT_LOAD_DWORD %0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll index c6c0b9cf8f027..cc2f775ff22bc 100644 --- a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll +++ b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll @@ -163,33 +163,33 @@ define amdgpu_kernel void @test_copy_v4i8_x3(ptr addrspace(1) %out0, ptr addrspa define amdgpu_kernel void @test_copy_v4i8_x4(ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3, ptr addrspace(1) %in) nounwind { ; SI-LABEL: test_copy_v4i8_x4: ; SI: ; %bb.0: -; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x11 -; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_mov_b32 s10, 0 -; SI-NEXT: s_mov_b32 s11, s3 +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11 +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, s11 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; SI-NEXT: v_mov_b32_e32 v1, 0 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 -; SI-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: s_mov_b32 s14, s2 -; SI-NEXT: s_mov_b32 s15, s3 -; SI-NEXT: s_mov_b32 s18, s2 +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-NEXT: s_mov_b32 s10, -1 +; SI-NEXT: s_mov_b32 s14, s10 +; SI-NEXT: s_mov_b32 s15, s11 +; SI-NEXT: s_mov_b32 s18, s10 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s0, s4 -; SI-NEXT: s_mov_b32 s1, s5 -; SI-NEXT: s_mov_b32 s19, s3 -; SI-NEXT: s_mov_b32 s22, s2 -; SI-NEXT: s_mov_b32 s23, s3 -; SI-NEXT: s_mov_b32 s12, s6 -; SI-NEXT: s_mov_b32 s13, s7 -; SI-NEXT: s_mov_b32 s16, s8 -; SI-NEXT: s_mov_b32 s17, s9 -; SI-NEXT: s_mov_b32 s20, s10 -; SI-NEXT: s_mov_b32 s21, s11 +; SI-NEXT: s_mov_b32 s8, s0 +; SI-NEXT: s_mov_b32 s9, s1 +; SI-NEXT: s_mov_b32 s19, s11 +; SI-NEXT: s_mov_b32 s22, s10 +; SI-NEXT: s_mov_b32 s23, s11 +; SI-NEXT: s_mov_b32 s12, s2 +; SI-NEXT: s_mov_b32 s13, s3 +; SI-NEXT: s_mov_b32 s16, s4 +; SI-NEXT: s_mov_b32 s17, s5 +; SI-NEXT: s_mov_b32 s20, s6 +; SI-NEXT: s_mov_b32 s21, s7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0 ; SI-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; SI-NEXT: buffer_store_dword v0, off, s[16:19], 0 ; SI-NEXT: buffer_store_dword v0, off, s[20:23], 0 diff --git a/llvm/test/CodeGen/AMDGPU/ctpop64.ll b/llvm/test/CodeGen/AMDGPU/ctpop64.ll index 3504546801c93..2258f6a7b5483 100644 --- a/llvm/test/CodeGen/AMDGPU/ctpop64.ll +++ b/llvm/test/CodeGen/AMDGPU/ctpop64.ll @@ -334,58 +334,58 @@ define amdgpu_kernel void @v_ctpop_v4i64(ptr addrspace(1) noalias %out, ptr addr define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(1) %in, i64 %ctpop_arg, i32 %cond) { ; SI-LABEL: ctpop_i64_in_br: ; SI: ; %bb.0: ; %entry -; SI-NEXT: s_load_dword s8, s[4:5], 0xf +; SI-NEXT: s_load_dword s6, s[4:5], 0xf ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_cmp_lg_u32 s8, 0 +; SI-NEXT: s_cmp_lg_u32 s6, 0 ; SI-NEXT: s_cbranch_scc0 .LBB7_4 ; SI-NEXT: ; %bb.1: ; %else -; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x2 +; SI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x2 ; SI-NEXT: s_mov_b64 s[2:3], 0 ; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3] ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_mov_b64 vcc, vcc ; SI-NEXT: s_cbranch_vccnz .LBB7_3 ; SI-NEXT: .LBB7_2: ; %if -; SI-NEXT: s_bcnt1_i32_b64 s4, s[6:7] -; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_bcnt1_i32_b64 s6, s[4:5] +; SI-NEXT: s_mov_b32 s7, 0 ; SI-NEXT: .LBB7_3: ; %endif -; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v0, s6 ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: v_mov_b32_e32 v1, s7 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; SI-NEXT: .LBB7_4: -; SI-NEXT: ; implicit-def: $sgpr4_sgpr5 +; SI-NEXT: ; implicit-def: $sgpr6_sgpr7 ; SI-NEXT: s_branch .LBB7_2 ; ; VI-LABEL: ctpop_i64_in_br: ; VI: ; %bb.0: ; %entry -; VI-NEXT: s_load_dword s8, s[4:5], 0x3c +; VI-NEXT: s_load_dword s6, s[4:5], 0x3c ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_cmp_lg_u32 s8, 0 +; VI-NEXT: s_cmp_lg_u32 s6, 0 ; VI-NEXT: s_cbranch_scc0 .LBB7_4 ; VI-NEXT: ; %bb.1: ; %else -; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x8 +; VI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x8 ; VI-NEXT: s_cbranch_execnz .LBB7_3 ; VI-NEXT: .LBB7_2: ; %if ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_bcnt1_i32_b64 s4, s[6:7] -; VI-NEXT: s_mov_b32 s5, 0 +; VI-NEXT: s_bcnt1_i32_b64 s6, s[4:5] +; VI-NEXT: s_mov_b32 s7, 0 ; VI-NEXT: .LBB7_3: ; %endif ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: v_mov_b32_e32 v0, s6 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; VI-NEXT: .LBB7_4: -; VI-NEXT: ; implicit-def: $sgpr4_sgpr5 +; VI-NEXT: ; implicit-def: $sgpr6_sgpr7 ; VI-NEXT: s_branch .LBB7_2 entry: %tmp0 = icmp eq i32 %cond, 0 diff --git a/llvm/test/CodeGen/AMDGPU/dbg-value-ends-sched-region.mir b/llvm/test/CodeGen/AMDGPU/dbg-value-ends-sched-region.mir index b38dc4d21c10c..4945c7020ca18 100644 --- a/llvm/test/CodeGen/AMDGPU/dbg-value-ends-sched-region.mir +++ b/llvm/test/CodeGen/AMDGPU/dbg-value-ends-sched-region.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -passes=machine-scheduler -o - %s | FileCheck %s # The DBG_VALUE in bb.5 ends a scheduling region, and its uses should # not be tracked like a normal instruction. diff --git a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir index 156979d6d06a5..8a1c68b3f6615 100644 --- a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir +++ b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=machine-scheduler -verify-machineinstrs %s -o - | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=machine-scheduler %s -o - | FileCheck %s --- | declare void @llvm.dbg.value(metadata, metadata, metadata) #0 diff --git a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-liveins.mir b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-liveins.mir index d415346b49b28..19071be7ebde4 100644 --- a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-liveins.mir +++ b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-liveins.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -passes=machine-scheduler %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck %s # REQUIRES: asserts # CHECK: ********** MI Scheduling ********** diff --git a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler.mir b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler.mir index 170672dc4af64..4f15e0ef68977 100644 --- a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler.mir +++ b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -passes=machine-scheduler %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck %s # REQUIRES: asserts # CHECK: All regions recorded, starting actual scheduling. diff --git a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir index 204912b4d4881..962d49df8509e 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir +++ b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s -# RUN: llc -mtriple=amdgcn -mcpu=tonga -passes=machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: flat_load_clustering # GCN: FLAT_LOAD_DWORD diff --git a/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir b/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir index 78f21ef6610f2..d57450baea911 100644 --- a/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir +++ b/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir @@ -1,8 +1,6 @@ # REQUIRES: asserts -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -run-pass=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -passes=machine-scheduler -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -run-pass=machine-scheduler -amdgpu-use-amdgpu-trackers=1 -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN-GCNTRACKER %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -passes=machine-scheduler -amdgpu-use-amdgpu-trackers=1 -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN-GCNTRACKER %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -run-pass=machine-scheduler -verify-misched -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -verify-misched -run-pass=machine-scheduler -amdgpu-use-amdgpu-trackers=1 -verify-misched -debug-only=machine-scheduler -o - %s 2>&1 | FileCheck -check-prefix=GCN-GCNTRACKER %s --- | define amdgpu_kernel void @high-RP-reschedule() { ret void } diff --git a/llvm/test/CodeGen/AMDGPU/idot2.ll b/llvm/test/CodeGen/AMDGPU/idot2.ll index cd85c301e16d5..b443e654350c5 100644 --- a/llvm/test/CodeGen/AMDGPU/idot2.ll +++ b/llvm/test/CodeGen/AMDGPU/idot2.ll @@ -151,20 +151,20 @@ entry: define amdgpu_kernel void @udot2_MulMul(ptr addrspace(1) %src1, ; GFX7-LABEL: udot2_MulMul: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 ; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 @@ -174,8 +174,8 @@ define amdgpu_kernel void @udot2_MulMul(ptr addrspace(1) %src1, ; GFX7-NEXT: v_mul_u32_u24_e32 v0, v0, v2 ; GFX7-NEXT: v_mad_u32_u24 v0, v3, v1, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v0, vcc, s4, v0 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: v_add_i32_e32 v0, vcc, s0, v0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: udot2_MulMul: @@ -1698,20 +1698,20 @@ entry: define amdgpu_kernel void @udot2_MultipleUses_add1(ptr addrspace(1) %src1, ; GFX7-LABEL: udot2_MultipleUses_add1: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 ; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 @@ -1719,10 +1719,10 @@ define amdgpu_kernel void @udot2_MultipleUses_add1(ptr addrspace(1) %src1, ; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v3, v1, s4 +; GFX7-NEXT: v_mad_u32_u24 v1, v3, v1, s0 ; GFX7-NEXT: v_mad_u32_u24 v0, v0, v2, v1 ; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: udot2_MultipleUses_add1: @@ -1851,20 +1851,20 @@ entry: define amdgpu_kernel void @idot2_MultipleUses_add1(ptr addrspace(1) %src1, ; GFX7-LABEL: idot2_MultipleUses_add1: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v2, 16, v2 @@ -1872,10 +1872,10 @@ define amdgpu_kernel void @idot2_MultipleUses_add1(ptr addrspace(1) %src1, ; GFX7-NEXT: v_bfe_i32 v3, v0, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v0, 16, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_i32_i24 v0, v0, v2, s4 +; GFX7-NEXT: v_mad_i32_i24 v0, v0, v2, s0 ; GFX7-NEXT: v_mad_i32_i24 v1, v3, v1, v0 ; GFX7-NEXT: v_add_i32_e32 v0, vcc, v1, v0 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: idot2_MultipleUses_add1: @@ -2004,20 +2004,20 @@ entry: define amdgpu_kernel void @udot2_MultipleUses_mul1(ptr addrspace(1) %src1, ; GFX7-LABEL: udot2_MultipleUses_mul1: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 ; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 @@ -2025,10 +2025,10 @@ define amdgpu_kernel void @udot2_MultipleUses_mul1(ptr addrspace(1) %src1, ; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v4, v0, v2, s4 +; GFX7-NEXT: v_mad_u32_u24 v4, v0, v2, s0 ; GFX7-NEXT: v_mad_u32_u24 v1, v3, v1, v4 ; GFX7-NEXT: v_mad_u32_u24 v0, v0, v2, v1 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: udot2_MultipleUses_mul1: @@ -2163,20 +2163,20 @@ entry: define amdgpu_kernel void @idot2_MultipleUses_mul1(ptr addrspace(1) %src1, ; GFX7-LABEL: idot2_MultipleUses_mul1: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v2, 16, v2 @@ -2184,10 +2184,10 @@ define amdgpu_kernel void @idot2_MultipleUses_mul1(ptr addrspace(1) %src1, ; GFX7-NEXT: v_bfe_i32 v3, v0, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v0, 16, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_i32_i24 v4, v3, v1, s4 +; GFX7-NEXT: v_mad_i32_i24 v4, v3, v1, s0 ; GFX7-NEXT: v_mad_i32_i24 v0, v0, v2, v4 ; GFX7-NEXT: v_mad_i32_i24 v0, v3, v1, v0 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: idot2_MultipleUses_mul1: @@ -2322,31 +2322,31 @@ entry: define amdgpu_kernel void @udot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX7-LABEL: udot2_MultipleUses_mul2: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 ; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v4, v3, v1, s4 +; GFX7-NEXT: v_mad_u32_u24 v4, v3, v1, s0 ; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX7-NEXT: v_mad_u32_u24 v1, v3, v1, v4 ; GFX7-NEXT: v_mad_u32_u24 v0, v0, v2, v1 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: udot2_MultipleUses_mul2: @@ -2479,20 +2479,20 @@ entry: define amdgpu_kernel void @idot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX7-LABEL: idot2_MultipleUses_mul2: ; GFX7: ; %bb.0: ; %entry -; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: s_mov_b32 s7, s3 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, s7 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX7-NEXT: s_mov_b64 s[8:9], s[0:1] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GFX7-NEXT: v_mov_b32_e32 v1, 0 -; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_mov_b64 s[4:5], s[10:11] -; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0 -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b64 s[8:9], s[2:3] +; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 +; GFX7-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v2, 16, v2 @@ -2500,10 +2500,10 @@ define amdgpu_kernel void @idot2_MultipleUses_mul2(ptr addrspace(1) %src1, ; GFX7-NEXT: v_bfe_i32 v3, v0, 0, 16 ; GFX7-NEXT: v_ashrrev_i32_e32 v0, 16, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mad_i32_i24 v4, v0, v2, s4 +; GFX7-NEXT: v_mad_i32_i24 v4, v0, v2, s0 ; GFX7-NEXT: v_mad_i32_i24 v0, v0, v2, v4 ; GFX7-NEXT: v_mad_i32_i24 v0, v3, v1, v0 -; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: idot2_MultipleUses_mul2: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll index 8b16fef915a79..07421afde7622 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll @@ -1,14 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s declare i32 @llvm.amdgcn.alignbyte(i32, i32, i32) #0 -; GCN-LABEL: {{^}}v_alignbyte_b32: -; GCN: v_alignbyte_b32 {{[vs][0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}} define amdgpu_kernel void @v_alignbyte_b32(ptr addrspace(1) %out, i32 %src1, i32 %src2, i32 %src3) #1 { +; GCN-LABEL: v_alignbyte_b32: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_alignbyte_b32 v0, s0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: v_alignbyte_b32: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s2 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_alignbyte_b32 v0, s0, s1, v0.l +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[4:5] +; GFX11-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: v_alignbyte_b32: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_alignbyte_b32 v0, s0, s1, v0 +; GFX11-FAKE16-NEXT: global_store_b32 v1, v0, s[4:5] +; GFX11-FAKE16-NEXT: s_endpgm %val = call i32 @llvm.amdgcn.alignbyte(i32 %src1, i32 %src2, i32 %src3) #0 store i32 %val, ptr addrspace(1) %out ret void } +define amdgpu_kernel void @v_alignbyte_b32_2(ptr addrspace(1) %out, ptr addrspace(1) %src1, ptr addrspace(1) %src2, i32 %src3) #1 { +; GCN-LABEL: v_alignbyte_b32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd +; GCN-NEXT: s_load_dword s16, s[4:5], 0xf +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s14, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: s_mov_b32 s15, s7 +; GCN-NEXT: s_mov_b64 s[10:11], s[14:15] +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[12:13], s[2:3] +; GCN-NEXT: buffer_load_dword v2, v[0:1], s[12:15], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: v_alignbyte_b32 v0, v2, v0, s16 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: v_alignbyte_b32_2: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3] glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: global_load_b32 v2, v0, s[6:7] glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x3c +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s2 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_alignbyte_b32 v0, v1, v2, v0.l +; GFX11-TRUE16-NEXT: global_store_b32 v3, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: v_alignbyte_b32_2: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_b32 v1, v0, s[2:3] glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: global_load_b32 v0, v0, s[6:7] glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x3c +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_alignbyte_b32 v0, v1, v0, s2 +; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %a.gep = getelementptr inbounds i32, ptr addrspace(1) %src1, i32 %tid + %b.gep = getelementptr inbounds i32, ptr addrspace(1) %src2, i32 %tid + %a.val = load volatile i32, ptr addrspace(1) %a.gep + %b.val = load volatile i32, ptr addrspace(1) %b.gep + + %val = call i32 @llvm.amdgcn.alignbyte(i32 %a.val, i32 %b.val, i32 %src3) #0 + store i32 %val, ptr addrspace(1) %out + ret void +} + attributes #0 = { nounwind readnone } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll index 0573de4a7f2d1..fe693b4af67f3 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll @@ -2373,21 +2373,21 @@ define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(ptr addrspace(1) %ou ; GCNX3-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCNX3-HSA-NEXT: flat_load_dwordx4 v[12:15], v[0:1] ; GCNX3-HSA-NEXT: s_add_u32 s4, s2, 48 +; GCNX3-HSA-NEXT: flat_load_dwordx4 v[8:11], v[0:1] ; GCNX3-HSA-NEXT: s_addc_u32 s5, s3, 0 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, s4 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GCNX3-HSA-NEXT: s_add_u32 s4, s2, 32 -; GCNX3-HSA-NEXT: flat_load_dwordx4 v[8:11], v[0:1] ; GCNX3-HSA-NEXT: s_addc_u32 s5, s3, 0 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, s4 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GCNX3-HSA-NEXT: s_add_u32 s2, s2, 16 -; GCNX3-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1] ; GCNX3-HSA-NEXT: s_addc_u32 s3, s3, 0 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCNX3-HSA-NEXT: flat_load_dwordx4 v[12:15], v[0:1] +; GCNX3-HSA-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GCNX3-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; GCNX3-HSA-NEXT: s_add_u32 s2, s0, 16 ; GCNX3-HSA-NEXT: s_addc_u32 s3, s1, 0 @@ -2406,10 +2406,10 @@ define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(ptr addrspace(1) %ou ; GCNX3-HSA-NEXT: v_mov_b32_e32 v20, s0 ; GCNX3-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCNX3-HSA-NEXT: s_waitcnt vmcnt(3) -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v13 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v12 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v12 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v13 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v9 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v8 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v8 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v9 ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[20:21], v[16:19] ; GCNX3-HSA-NEXT: v_mov_b32_e32 v21, s3 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v20, s2 @@ -2418,51 +2418,50 @@ define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(ptr addrspace(1) %ou ; GCNX3-HSA-NEXT: v_mov_b32_e32 v29, s3 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v28, s2 ; GCNX3-HSA-NEXT: s_add_u32 s2, s0, 32 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v15 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v14 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v14 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v15 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v11 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v10 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v10 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v11 ; GCNX3-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19] ; GCNX3-HSA-NEXT: s_waitcnt vmcnt(4) -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v11 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v10 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v9 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v8 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v8 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v9 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v12, v10 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v14, v11 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v15 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v14 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v13 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v12 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v16, v12 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v18, v13 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v8, v14 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v10, v15 ; GCNX3-HSA-NEXT: s_add_u32 s0, s0, 48 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v23, s3 ; GCNX3-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[24:25], v[16:19] -; GCNX3-HSA-NEXT: flat_store_dwordx4 v[26:27], v[12:15] -; GCNX3-HSA-NEXT: s_waitcnt vmcnt(5) -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v7 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v6 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v5 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v4 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v12, v4 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v14, v5 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v8, v6 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v10, v7 +; GCNX3-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11] +; GCNX3-HSA-NEXT: s_waitcnt vmcnt(4) +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v1 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v2 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v0 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v12, v0 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v14, v1 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v8, v2 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v10, v3 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v22, s2 ; GCNX3-HSA-NEXT: v_mov_b32_e32 v31, s1 ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15] ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[28:29], v[8:11] ; GCNX3-HSA-NEXT: v_mov_b32_e32 v30, s0 -; GCNX3-HSA-NEXT: s_waitcnt vmcnt(6) -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v1 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v0 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v8, v0 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v10, v1 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v3 -; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v2 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v4, v2 -; GCNX3-HSA-NEXT: v_mov_b32_e32 v6, v3 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v5 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v4 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v8, v4 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v10, v5 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v7 +; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v6 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v0, v6 +; GCNX3-HSA-NEXT: v_mov_b32_e32 v2, v7 ; GCNX3-HSA-NEXT: flat_store_dwordx4 v[22:23], v[8:11] -; GCNX3-HSA-NEXT: flat_store_dwordx4 v[30:31], v[4:7] +; GCNX3-HSA-NEXT: flat_store_dwordx4 v[30:31], v[0:3] ; GCNX3-HSA-NEXT: s_endpgm ; ; GCNX3-NOHSA-LABEL: global_sextload_v16i32_to_v16i64: diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-debug.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-debug.mir index 5dc6d2ee8f695..e32de1e42aac4 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-debug.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-debug.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass machine-scheduler -amdgpu-disable-unclustered-high-rp-reschedule -verify-machineinstrs %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck -check-prefix=DEBUG %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -passes=machine-scheduler -amdgpu-disable-unclustered-high-rp-reschedule %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck -check-prefix=DEBUG %s # REQUIRES: asserts --- | diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir index 9991cb1837e01..fb65d80c46e06 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler -amdgpu-disable-unclustered-high-rp-reschedule -verify-machineinstrs %s -o - | FileCheck -check-prefix=GFX908 %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -passes=machine-scheduler -amdgpu-disable-unclustered-high-rp-reschedule %s -o - | FileCheck -check-prefix=GFX908 %s --- name: test_occ_10_max_occ_no_sink diff --git a/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir b/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir index ffc86dc5eee6f..2aa430400e49a 100644 --- a/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir +++ b/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s -# RUN: llc -mtriple=amdgcn -mcpu=tahiti -passes=machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: cluster_add_addc # GCN: S_NOP 0, implicit-def $vcc diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-fold-reg-sequence-subreg.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-fold-reg-sequence-subreg.mir index d32163bb69235..057769372c041 100644 --- a/llvm/test/CodeGen/AMDGPU/peephole-opt-fold-reg-sequence-subreg.mir +++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-fold-reg-sequence-subreg.mir @@ -14,7 +14,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY1]] %0:vreg_64_align2 = COPY $vgpr0_vgpr1 %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -60,7 +60,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub1, [[COPY]].sub0, %subreg.sub0 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY1]] %0:vreg_64_align2 = COPY $vgpr0_vgpr1 %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -106,7 +106,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr1_vgpr2 ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1, [[COPY]].sub1, %subreg.sub2 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY1]] %0:vreg_64 = COPY $vgpr1_vgpr2 %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -129,7 +129,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr1_vgpr2 ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1, [[COPY]].sub1, %subreg.sub2 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY1]] %0:vreg_64 = COPY $vgpr1_vgpr2 %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir index c5e51aa1b4fe8..e1ff42125ce9a 100644 --- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir +++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir @@ -23,8 +23,8 @@ body: | ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[REG_SEQUENCE]].sub1, %subreg.sub0, [[REG_SEQUENCE]].sub0, %subreg.sub1 - ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1 + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 ; GCN-NEXT: KILL [[COPY3]], implicit [[COPY2]] %0:vgpr_32 = COPY $vgpr0 %1:vgpr_32 = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/sched-assert-dead-def-subreg-use-other-subreg.mir b/llvm/test/CodeGen/AMDGPU/sched-assert-dead-def-subreg-use-other-subreg.mir index c90975959c3f4..c933fb0de5864 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-assert-dead-def-subreg-use-other-subreg.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-assert-dead-def-subreg-use-other-subreg.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -passes=machine-scheduler -verify-misched -o - %s | FileCheck %s +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s # This would assert that a dead def should have no uses, but the dead # def and use have different subreg indices. diff --git a/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir b/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir index 2cd78062ccbd7..add7825a224ed 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=machine-scheduler -verify-machineinstrs %s -o - | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=machine-scheduler %s -o - | FileCheck %s # The sequence of DBG_VALUEs forms a scheduling region with 0 real # instructions. The RegPressure tracker would end up skipping over any diff --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir index f797b01d49bf8..3fdb0c7c0885b 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -passes=machine-scheduler -verify-misched -o - %s | FileCheck %s # This would hang after removing edges from the SCHED_BARRIER since the number # of Preds/Succs would be left in an inconsistent state. diff --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir index 3254f5e45e4f4..09037709d51d8 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir @@ -1,5 +1,4 @@ # RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -passes=machine-scheduler -o - %s | FileCheck %s --- | %struct.widget.0 = type { float, i32, i32 } diff --git a/llvm/test/CodeGen/AMDGPU/sched-handleMoveUp-subreg-def-across-subreg-def.mir b/llvm/test/CodeGen/AMDGPU/sched-handleMoveUp-subreg-def-across-subreg-def.mir index 3ca61d26e8e42..6796391aba675 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-handleMoveUp-subreg-def-across-subreg-def.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-handleMoveUp-subreg-def-across-subreg-def.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -verify-misched -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-misched -passes=machine-scheduler -o - %s | FileCheck %s --- name: handleMoveUp_incorrect_interval diff --git a/llvm/test/CodeGen/AMDGPU/schedule-barrier-fpmode.mir b/llvm/test/CodeGen/AMDGPU/schedule-barrier-fpmode.mir index 099cfc4f1dd54..0b1fd441256d8 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-barrier-fpmode.mir +++ b/llvm/test/CodeGen/AMDGPU/schedule-barrier-fpmode.mir @@ -1,8 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -passes=machine-scheduler -o - %s | FileCheck %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -passes=machine-scheduler -o - %s | FileCheck %s # Make sure FP mode is not a hard scheduling boundary --- diff --git a/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir b/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir index 88e11c9ce3d1d..e67036f0bbbea 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir +++ b/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=machine-scheduler -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -passes=machine-scheduler -o - %s | FileCheck %s --- # Check that the high latency loads are both scheduled first, before the diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll index e2bcf3f6a2e2c..3a872a6080952 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -9779,111 +9779,118 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] ; GFX6-NEXT: v_lshlrev_b32_e32 v5, 8, v0 ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:240 -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:224 -; GFX6-NEXT: s_mov_b32 s2, 0x86a00 -; GFX6-NEXT: s_mov_b64 s[8:9], exec -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:208 ; GFX6-NEXT: s_mov_b32 s2, 0x86600 +; GFX6-NEXT: s_mov_b64 s[8:9], exec ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:192 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:224 ; GFX6-NEXT: s_mov_b32 s2, 0x86200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:176 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:208 ; GFX6-NEXT: s_mov_b32 s2, 0x85e00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:160 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:192 ; GFX6-NEXT: s_mov_b32 s2, 0x85a00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:144 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:176 ; GFX6-NEXT: s_mov_b32 s2, 0x85600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:128 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:160 ; GFX6-NEXT: s_mov_b32 s2, 0x85200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:112 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:144 ; GFX6-NEXT: s_mov_b32 s2, 0x84e00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:96 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:128 ; GFX6-NEXT: s_mov_b32 s2, 0x84a00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:80 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:112 ; GFX6-NEXT: s_mov_b32 s2, 0x84600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:64 +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:96 ; GFX6-NEXT: s_mov_b32 s2, 0x84200 ; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: s_waitcnt expcnt(0) +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:80 +; GFX6-NEXT: s_mov_b32 s2, 0x83e00 +; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: s_waitcnt expcnt(0) +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:64 +; GFX6-NEXT: s_mov_b32 s2, 0x83a00 +; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: s_waitcnt expcnt(0) +; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:16 +; GFX6-NEXT: s_mov_b32 s2, 0x83200 +; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[8:11], v[5:6], s[4:7], 0 addr64 -; GFX6-NEXT: buffer_load_dwordx4 v[12:15], v[5:6], s[4:7], 0 addr64 offset:16 +; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:32 ; GFX6-NEXT: s_mov_b32 s2, 0x83600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v12, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v13, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v14, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v15, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dwordx4 v[12:15], v[5:6], s[4:7], 0 addr64 offset:32 -; GFX6-NEXT: s_mov_b32 s2, 0x83a00 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v12, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v13, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v14, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v15, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: s_mov_b64 s[2:3], s[6:7] ; GFX6-NEXT: s_mov_b64 exec, 15 ; GFX6-NEXT: buffer_store_dword v4, off, s[40:43], 0 @@ -9898,16 +9905,17 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: buffer_load_dword v4, off, s[40:43], 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_mov_b64 exec, s[8:9] -; GFX6-NEXT: buffer_load_dwordx4 v[12:15], v[5:6], s[4:7], 0 addr64 offset:48 -; GFX6-NEXT: s_mov_b32 s0, 0x83e00 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 13, v8 +; GFX6-NEXT: buffer_load_dwordx4 v[7:10], v[5:6], s[4:7], 0 addr64 offset:48 +; GFX6-NEXT: s_mov_b32 s0, 0x86a00 +; GFX6-NEXT: v_lshlrev_b32_e32 v4, 13, v0 ; GFX6-NEXT: v_add_i32_e32 v4, vcc, 16, v4 -; GFX6-NEXT: v_mov_b32_e32 v7, 1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dword v12, off, s[40:43], s0 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v13, off, s[40:43], s0 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v14, off, s[40:43], s0 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v15, off, s[40:43], s0 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s0 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Spill +; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Spill +; GFX6-NEXT: s_waitcnt expcnt(3) +; GFX6-NEXT: v_mov_b32_e32 v7, 1 ; GFX6-NEXT: s_mov_b64 s[0:1], exec ; GFX6-NEXT: buffer_store_dword v7, v4, s[40:43], 0 offen ; GFX6-NEXT: ;;#ASMSTART @@ -9930,7 +9938,7 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: buffer_load_dword v4, off, s[40:43], 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_mov_b64 exec, s[0:1] -; GFX6-NEXT: s_mov_b32 s6, 0x83200 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 ; GFX6-NEXT: ;;#ASMSTART ; GFX6-NEXT: ; def s[8:15] ; GFX6-NEXT: ;;#ASMEND @@ -9949,11 +9957,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: ;;#ASMSTART ; GFX6-NEXT: ; def s33 ; GFX6-NEXT: ;;#ASMEND -; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s6 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s6 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s6 offset:8 ; 4-byte Folded Spill -; GFX6-NEXT: buffer_store_dword v11, off, s[40:43], s6 offset:12 ; 4-byte Folded Spill -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8 ; GFX6-NEXT: s_and_saveexec_b64 s[6:7], vcc ; GFX6-NEXT: s_mov_b64 vcc, s[6:7] ; GFX6-NEXT: s_cbranch_execz .LBB1_2 @@ -10184,127 +10187,126 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: buffer_load_dword v4, off, s[40:43], 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_mov_b64 exec, s[4:5] -; GFX6-NEXT: s_mov_b64 s[38:39], s[2:3] -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:240 -; GFX6-NEXT: s_mov_b32 s0, 0x86a00 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x86600 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:224 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: s_mov_b64 s[38:39], s[2:3] ; GFX6-NEXT: s_mov_b32 s0, 0x86200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:208 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:240 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x85e00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:192 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:224 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x85a00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:176 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:208 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x85600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:160 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:192 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x85200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:144 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:176 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x84e00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:128 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:160 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x84a00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:112 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:144 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x84600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:96 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:128 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x84200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:80 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:112 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x83e00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:64 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:96 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x83a00 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:48 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:80 +; GFX6-NEXT: s_waitcnt expcnt(0) +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: s_mov_b32 s0, 0x86a00 +; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:64 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x83600 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:32 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:48 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_mov_b32 s0, 0x83200 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 offset:16 +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:32 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: buffer_load_dword v0, off, s[40:43], s0 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v1, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v2, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload -; GFX6-NEXT: buffer_load_dword v3, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v7, off, s[40:43], s0 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Reload +; GFX6-NEXT: buffer_load_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Reload ; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dwordx4 v[7:10], v[5:6], s[36:39], 0 addr64 offset:16 ; GFX6-NEXT: buffer_store_dwordx4 v[0:3], v[5:6], s[36:39], 0 addr64 ; GFX6-NEXT: s_endpgm ; @@ -10322,59 +10324,60 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v4, 16 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) ; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:224 -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2040 -; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[11:14], v5, s[38:39] offset:224 ; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:208 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2030 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) ; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[19:22], v5, s[38:39] offset:192 +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:192 +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020 +; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill ; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[15:18], v5, s[38:39] offset:176 -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[11:14], v5, s[38:39] offset:160 +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:160 +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2040 +; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill ; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:144 -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2010 ; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:128 +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2010 +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[19:22], v5, s[38:39] ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:112 -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020 -; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(2) ; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:112 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20c0 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v6, 1 -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[7:10], v5, s[38:39] ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(2) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:96 +; GFX9-FLATSCR-NEXT: v_lshl_add_u32 v4, v19, 13, v4 +; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19 +; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:96 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20b0 -; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(2) -; GFX9-FLATSCR-NEXT: v_lshl_add_u32 v4, v7, 13, v4 -; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7 -; GFX9-FLATSCR-NEXT: scratch_store_dword v4, v6, off -; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:80 +; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:80 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20a0 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:64 +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:64 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2090 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:48 +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:48 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2080 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:32 +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:32 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2070 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:16 +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:16 ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2060 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: s_nop 0 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v6, 1 +; GFX9-FLATSCR-NEXT: scratch_store_dword v4, v6, off ; GFX9-FLATSCR-NEXT: ;;#ASMSTART ; GFX9-FLATSCR-NEXT: ; def s[0:7] ; GFX9-FLATSCR-NEXT: ;;#ASMEND @@ -10403,27 +10406,26 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX9-FLATSCR-NEXT: ; use s[0:7],s[8:15],s[16:23],s[24:31],s[40:43],s[38:39] ; GFX9-FLATSCR-NEXT: ;;#ASMEND ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20d0 -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[15:18], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20e0 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v0, v11 -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[19:22], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[15:18], s0 ; 16-byte Folded Spill ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20f0 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v1, v12 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v2, v13 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v3, v14 -; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[7:10], s0 ; 16-byte Folded Spill -; GFX9-FLATSCR-NEXT: s_nop 0 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v0, v19 +; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[11:14], s0 ; 16-byte Folded Spill +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v1, v20 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v2, v21 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v3, v22 ; GFX9-FLATSCR-NEXT: ;;#ASMSTART ; GFX9-FLATSCR-NEXT: ;;#ASMEND -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[7:10], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[11:14], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20e0 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[19:22], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20d0 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v22, v3 ; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[15:18], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v14, v3 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v13, v2 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v12, v1 -; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v11, v0 +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20d0 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v21, v2 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v20, v1 +; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v19, v0 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: ;;#ASMSTART ; GFX9-FLATSCR-NEXT: ;;#ASMEND ; GFX9-FLATSCR-NEXT: ;;#ASMSTART @@ -10439,53 +10441,53 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX9-FLATSCR-NEXT: .LBB1_2: ; %ret ; GFX9-FLATSCR-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20c0 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20b0 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:112 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:112 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20a0 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:96 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:96 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2090 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:80 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:80 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2080 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:64 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:64 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2070 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:48 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:48 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2060 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:32 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2050 -; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:16 -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[7:10], s[36:37] +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:32 ; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2040 +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2050 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:240 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:16 +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[19:22], s[36:37] +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[19:22], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2030 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:224 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2010 +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[19:22], s[36:37] offset:240 +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[11:14], s[36:37] offset:224 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[11:14], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020 +; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[11:14], s[36:37] offset:208 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[11:14], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2040 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:208 -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[19:22], s[36:37] offset:192 +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[11:14], s[36:37] offset:192 ; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[15:18], s[36:37] offset:176 -; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[11:14], s[36:37] offset:160 -; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload -; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020 +; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[11:14], off, s0 ; 16-byte Folded Reload +; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2010 ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[11:14], s[36:37] offset:160 ; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:144 ; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload ; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/sreg-xnull-regclass-bitwidth.mir b/llvm/test/CodeGen/AMDGPU/sreg-xnull-regclass-bitwidth.mir index 3091fe85fa8bc..d8d4f5d0220c9 100644 --- a/llvm/test/CodeGen/AMDGPU/sreg-xnull-regclass-bitwidth.mir +++ b/llvm/test/CodeGen/AMDGPU/sreg-xnull-regclass-bitwidth.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -run-pass=postmisched -o - %s | FileCheck %s -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=postmisched -o - %s | FileCheck %s --- name: test_xnull_256 body: | diff --git a/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir b/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir index 1bee32f4c90cd..0a47b87b422dd 100644 --- a/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir +++ b/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple arm-arm-eabi -mcpu=cortex-m7 -verify-machineinstrs -run-pass=postmisched %s -o - | FileCheck %s -# RUN: llc -mtriple arm-arm-eabi -mcpu=cortex-m7 -passes=postmisched %s -o - | FileCheck %s --- name: test_groups alignment: 2 diff --git a/llvm/test/CodeGen/ARM/misched-branch-targets.mir b/llvm/test/CodeGen/ARM/misched-branch-targets.mir index 610344f844001..d828d9e516273 100644 --- a/llvm/test/CodeGen/ARM/misched-branch-targets.mir +++ b/llvm/test/CodeGen/ARM/misched-branch-targets.mir @@ -1,7 +1,5 @@ # RUN: llc -o - -run-pass=machine-scheduler -misched=shuffle %s | FileCheck %s -# RUN: llc -o - -passes=machine-scheduler -misched=shuffle %s | FileCheck %s # RUN: llc -o - -run-pass=postmisched %s | FileCheck %s -# RUN: llc -o - -passes=postmisched %s | FileCheck %s # REQUIRES: asserts # -misched=shuffle is only available with assertions enabled diff --git a/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir b/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir new file mode 100644 index 0000000000000..e0676a143eefe --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir @@ -0,0 +1,143 @@ +# RUN: llc -march=hexagon -run-pass=hexagon-rdf-opt -hexagon-rdf-dump -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s + +# Check that RDF graph has a phi node for R28 register in bb.3 and bb.4 +# R28 is clobbered by memcpy call. The clobbering def must be present in bb.4's IDF +# This phi node should prevent $r27 from being replaced by $r28 by RDF copy propagation + +#CHECK-LABEL: Starting copy propagation on: foo + +#CHECK-LABEL: --- %bb.3 --- +#CHECK: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +#CHECK-LABEL: --- %bb.4 --- +#CHECK: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +#CHECK-LABEL: After Hexagon RDF optimizations +#CHECK-LABEL: bb.3: +#CHECK: renamable $r0 = A2_add renamable $r27 + +--- | + define internal fastcc void @foo() unnamed_addr { + entry: + ret void + } + + declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) + +--- +name: foo +alignment: 16 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +callsEHReturn: false +callsUnwindInit: false +hasEHCatchret: false +hasEHScopes: false +hasEHFunclets: false +isOutlined: false +debugInstrRef: false +failsVerification: false +tracksDebugUserValues: true +registers: [] +liveins: + - { reg: '$d0', virtual-reg: '' } + - { reg: '$d3', virtual-reg: '' } + - { reg: '$r23', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 8 + adjustsStack: true + hasCalls: true + stackProtector: '' + functionContext: '' + maxCallFrameSize: 4294967295 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + hasTailCall: false + isCalleeSavedInfoValid: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 40, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +entry_values: [] +callSites: [] +debugValueSubstitutions: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1 + liveins: $d0, $d3, $r23 + + J2_jump %bb.1, implicit-def dead $pc + + bb.1: + successors: %bb.2 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $r23 + + renamable $r28 = L2_loadri_io %fixed-stack.0, 0 :: (load (s32) from %fixed-stack.0) + renamable $r27 = COPY killed renamable $r28 + + bb.2: + successors: %bb.3 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $r23, $r27 + + renamable $d10 = L2_loadrd_io %stack.0, 0 :: (load (s64) from %stack.0) + renamable $d11 = L2_loadrd_io %stack.1, 0 :: (load (s64) from %stack.1) + + bb.3: + successors: %bb.4, %bb.3 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $d10:0x0000000000000003, $d11:0x0000000000000002, $r23, $r27 + + ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + renamable $r1 = A2_add renamable $r23, killed renamable $r0 + $r2 = COPY renamable $r22 + renamable $r0 = A2_add renamable $r27, killed renamable $r6 + J2_call &memcpy, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit $r1, implicit $r2, implicit-def $r29, implicit-def dead $r0 + renamable $p0 = C2_cmpgtp renamable $d11, renamable $d10 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 + J2_jumpt killed renamable $p0, %bb.3, implicit-def dead $pc + J2_jump %bb.4, implicit-def dead $pc + + bb.4: + successors: %bb.5, %bb.2 + liveins: $d10:0x0000000000000003, $d11:0x0000000000000002, $r23, $r27 + + renamable $d0 = L2_loadrd_io %stack.2, 0 :: (load (s64) from %stack.2) + renamable $d3 = L2_loadrd_io %stack.3, 0 :: (load (s64) from %stack.3) + renamable $p0 = C2_cmpgtp killed renamable $d0, killed renamable $d3 + J2_jumpt killed renamable $p0, %bb.2, implicit-def dead $pc + J2_jump %bb.5, implicit-def dead $pc + + bb.5: + PS_jmpret $r31, implicit-def dead $pc + +... diff --git a/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir b/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir new file mode 100644 index 0000000000000..d49cc3403d644 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir @@ -0,0 +1,102 @@ +# RUN: llc -march=hexagon -run-pass=hexagon-rdf-opt \ +# RUN: -hexagon-rdf-dump -verify-machineinstrs -o /dev/null %s 2>&1 \ +# RUN: | FileCheck %s + +# Check that phi nodes that only have clobbering reaching defs are not created +# during graph construction. Check that there are no phi nodes for HVX registers + +#CHECK-LABEL: --- %bb.1 --- +#CHECK-NOT: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +--- | + @.str.3 = private unnamed_addr constant [2 x i8] c"%d", align 8 + @.str.4 = private unnamed_addr constant [2 x i8] c"%d", align 8 + + define internal fastcc void @foo() unnamed_addr { + entry: + ret void + } + + declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local_unnamed_addr + +--- +name: foo +alignment: 16 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +callsEHReturn: false +callsUnwindInit: false +hasEHCatchret: false +hasEHScopes: false +hasEHFunclets: false +isOutlined: false +debugInstrRef: false +failsVerification: false +tracksDebugUserValues: true +registers: [] +liveins: + - { reg: '$d0', virtual-reg: '' } + - { reg: '$d3', virtual-reg: '' } + - { reg: '$r23', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 8 + adjustsStack: true + hasCalls: true + stackProtector: '' + functionContext: '' + maxCallFrameSize: 4294967295 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + hasTailCall: false + isCalleeSavedInfoValid: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +entry_values: [] +callSites: [] +debugValueSubstitutions: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1 + liveins: $r25, $r26, $d11 + + renamable $r16 = A2_tfrsi 0 + S2_storerd_io $r29, 0, renamable $d11 :: (store (s64) into stack) + $r0 = A2_tfrsi @.str.3 + J2_call @printf, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit-def $r29, implicit-def dead $r0 + J2_jump %bb.1, implicit-def dead $pc + + bb.1: + successors: %bb.2, %bb.1 + liveins: $r16, $r25, $r26 + + S2_storeri_io $r29, 0, killed renamable $r25 :: (store (s32) into stack) + $r0 = A2_tfrsi @.str.4 + S2_storeri_io $r29, 8, killed renamable $r26 :: (store (s64) into stack + 8) + J2_call @printf, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit-def $r29, implicit-def dead $r0 + renamable $p0 = C2_cmpgti renamable $r16, 4 + renamable $r16 = nsw A2_addi killed renamable $r16, 1 + J2_jumpf killed renamable $p0, %bb.2, implicit-def dead $pc + J2_jump %bb.1, implicit-def dead $pc + + bb.2: + liveins: $r16, $r25, $r26 + + PS_jmpret $r31, implicit-def dead $pc + +... diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll new file mode 100644 index 0000000000000..6e0ec6bcf4465 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll @@ -0,0 +1,135 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | FileCheck --check-prefixes=CHECK_PTX64_SHARED32 %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | %ptxas-verify -arch=sm_100a %} + +declare void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr) + +; CHECK-LABEL: test_tcgen05_commit +define void @test_tcgen05_commit(ptr %bar_addr) { +; CHECK_PTX64-LABEL: test_tcgen05_commit( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_param_0]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_param_0]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr) + + call void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr) + + ret void +} + +; CHECK-LABEL: test_tcgen05_commit_shared +define void @test_tcgen05_commit_shared(ptr addrspace(3) %bar_addr) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_shared( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_shared_param_0]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_shared( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u32 %r1, [test_tcgen05_commit_shared_param_0]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%r1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%r1]; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr) + + call void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr) + + ret void +} + +declare void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask) + +; CHECK-LABEL: test_tcgen05_commit_mc +define void @test_tcgen05_commit_mc(ptr %bar_addr, i16 %cta_mask) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_mc( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_param_0]; +; CHECK_PTX64-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_param_1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_param_0]; +; CHECK_PTX64_SHARED32-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_param_1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %bar_addr, i16 %cta_mask) + + call void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask) + + ret void +} + +; CHECK-LABEL: test_tcgen05_commit_mc_shared +define void @test_tcgen05_commit_mc_shared(ptr addrspace(3) %bar_addr, i16 %cta_mask) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_shared( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_shared_param_0]; +; CHECK_PTX64-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_shared_param_1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_shared( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u32 %r1, [test_tcgen05_commit_mc_shared_param_0]; +; CHECK_PTX64_SHARED32-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_shared_param_1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask) + + call void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll new file mode 100644 index 0000000000000..07c62671d2fbd --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll @@ -0,0 +1,42 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} + +declare void @llvm.nvvm.tcgen05.fence.before.thread.sync() +declare void @llvm.nvvm.tcgen05.fence.after.thread.sync() +declare void @llvm.nvvm.tcgen05.wait.ld() +declare void @llvm.nvvm.tcgen05.wait.st() + +; CHECK-LABEL: test_tcgen05_fence +define void @test_tcgen05_fence() { +; CHECK-LABEL: test_tcgen05_fence( +; CHECK: { +; CHECK-EMPTY: +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: tcgen05.fence::before_thread_sync; +; CHECK-NEXT: tcgen05.fence::after_thread_sync; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.fence.before.thread.sync() + + call void @llvm.nvvm.tcgen05.fence.after.thread.sync() + + ret void +} + +; CHECK-LABEL: test_tcgen05_wait +define void @test_tcgen05_wait() { +; CHECK-LABEL: test_tcgen05_wait( +; CHECK: { +; CHECK-EMPTY: +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: tcgen05.wait::ld.sync.aligned; +; CHECK-NEXT: tcgen05.wait::st.sync.aligned; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.wait.ld() + + call void @llvm.nvvm.tcgen05.wait.st() + + ret void +} diff --git a/llvm/test/CodeGen/PowerPC/topdepthreduce-postra.mir b/llvm/test/CodeGen/PowerPC/topdepthreduce-postra.mir index 8bdbe288d98e6..627e553475480 100644 --- a/llvm/test/CodeGen/PowerPC/topdepthreduce-postra.mir +++ b/llvm/test/CodeGen/PowerPC/topdepthreduce-postra.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -run-pass=postmisched -o - %s | FileCheck %s -# RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -passes=postmisched -o - %s | FileCheck %s --- # Check that postmisched's TopDepthReduce heuristic moves the MULLD later # because of the dependency on x5 diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll index 874cf897470e7..f9f1ba60a8ac0 100644 --- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll +++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll @@ -419,8 +419,8 @@ define void @callee() { ; ; RV32IZCMP-LABEL: callee: ; RV32IZCMP: # %bb.0: -; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96 +; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -80 +; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80 ; RV32IZCMP-NEXT: .cfi_offset ra, -52 ; RV32IZCMP-NEXT: .cfi_offset s0, -48 ; RV32IZCMP-NEXT: .cfi_offset s1, -44 @@ -436,18 +436,18 @@ define void @callee() { ; RV32IZCMP-NEXT: .cfi_offset s11, -4 ; RV32IZCMP-NEXT: lui t0, %hi(var) ; RV32IZCMP-NEXT: lw a0, %lo(var)(t0) -; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+4)(t0) ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+8)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var+4)(t0) ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+12)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var+8)(t0) ; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, %lo(var+12)(t0) +; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: addi a5, t0, %lo(var) ; RV32IZCMP-NEXT: lw a0, 16(a5) -; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 20(a5) ; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, 20(a5) +; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: lw t4, 24(a5) ; RV32IZCMP-NEXT: lw t5, 28(a5) ; RV32IZCMP-NEXT: lw t6, 32(a5) @@ -500,19 +500,19 @@ define void @callee() { ; RV32IZCMP-NEXT: sw t6, 32(a5) ; RV32IZCMP-NEXT: sw t5, 28(a5) ; RV32IZCMP-NEXT: sw t4, 24(a5) -; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 20(a5) -; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 16(a5) -; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+12)(t0) -; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+8)(t0) -; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+4)(t0) -; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var)(t0) -; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96 +; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 80 ; ; RV32IZCMP-WITH-FP-LABEL: callee: ; RV32IZCMP-WITH-FP: # %bb.0: @@ -1055,18 +1055,18 @@ define void @callee() { ; RV64IZCMP-NEXT: .cfi_offset s11, -8 ; RV64IZCMP-NEXT: lui t0, %hi(var) ; RV64IZCMP-NEXT: lw a0, %lo(var)(t0) -; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+4)(t0) -; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+8)(t0) -; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+12)(t0) -; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: addi a5, t0, %lo(var) ; RV64IZCMP-NEXT: lw a0, 16(a5) -; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 20(a5) -; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw t4, 24(a5) ; RV64IZCMP-NEXT: lw t5, 28(a5) ; RV64IZCMP-NEXT: lw t6, 32(a5) @@ -1119,17 +1119,17 @@ define void @callee() { ; RV64IZCMP-NEXT: sw t6, 32(a5) ; RV64IZCMP-NEXT: sw t5, 28(a5) ; RV64IZCMP-NEXT: sw t4, 24(a5) -; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 20(a5) ; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 16(a5) +; RV64IZCMP-NEXT: sw a0, 20(a5) ; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+12)(t0) +; RV64IZCMP-NEXT: sw a0, 16(a5) ; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+8)(t0) +; RV64IZCMP-NEXT: sw a0, %lo(var+12)(t0) ; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+4)(t0) +; RV64IZCMP-NEXT: sw a0, %lo(var+8)(t0) ; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: sw a0, %lo(var+4)(t0) +; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var)(t0) ; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160 ; @@ -1798,54 +1798,54 @@ define void @caller() { ; RV32IZCMP-NEXT: .cfi_offset s9, -12 ; RV32IZCMP-NEXT: .cfi_offset s10, -8 ; RV32IZCMP-NEXT: .cfi_offset s11, -4 -; RV32IZCMP-NEXT: addi sp, sp, -48 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160 +; RV32IZCMP-NEXT: addi sp, sp, -32 +; RV32IZCMP-NEXT: .cfi_def_cfa_offset 144 ; RV32IZCMP-NEXT: lui s0, %hi(var) ; RV32IZCMP-NEXT: lw a0, %lo(var)(s0) -; RV32IZCMP-NEXT: sw a0, 92(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+4)(s0) ; RV32IZCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+8)(s0) +; RV32IZCMP-NEXT: lw a0, %lo(var+4)(s0) ; RV32IZCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var+12)(s0) +; RV32IZCMP-NEXT: lw a0, %lo(var+8)(s0) ; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, %lo(var+12)(s0) +; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: addi s1, s0, %lo(var) ; RV32IZCMP-NEXT: lw a0, 16(s1) -; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 20(s1) ; RV32IZCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 24(s1) +; RV32IZCMP-NEXT: lw a0, 20(s1) ; RV32IZCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 28(s1) +; RV32IZCMP-NEXT: lw a0, 24(s1) ; RV32IZCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 32(s1) +; RV32IZCMP-NEXT: lw a0, 28(s1) ; RV32IZCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 36(s1) +; RV32IZCMP-NEXT: lw a0, 32(s1) ; RV32IZCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 40(s1) +; RV32IZCMP-NEXT: lw a0, 36(s1) ; RV32IZCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 44(s1) +; RV32IZCMP-NEXT: lw a0, 40(s1) ; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 48(s1) +; RV32IZCMP-NEXT: lw a0, 44(s1) ; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 52(s1) +; RV32IZCMP-NEXT: lw a0, 48(s1) ; RV32IZCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 56(s1) +; RV32IZCMP-NEXT: lw a0, 52(s1) ; RV32IZCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 60(s1) +; RV32IZCMP-NEXT: lw a0, 56(s1) ; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 64(s1) +; RV32IZCMP-NEXT: lw a0, 60(s1) ; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 68(s1) +; RV32IZCMP-NEXT: lw a0, 64(s1) ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 72(s1) +; RV32IZCMP-NEXT: lw a0, 68(s1) ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 76(s1) +; RV32IZCMP-NEXT: lw a0, 72(s1) ; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 80(s1) +; RV32IZCMP-NEXT: lw a0, 76(s1) ; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 84(s1) +; RV32IZCMP-NEXT: lw a0, 80(s1) ; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, 84(s1) +; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: lw s4, 88(s1) ; RV32IZCMP-NEXT: lw s5, 92(s1) ; RV32IZCMP-NEXT: lw s6, 96(s1) @@ -1867,51 +1867,51 @@ define void @caller() { ; RV32IZCMP-NEXT: sw s6, 96(s1) ; RV32IZCMP-NEXT: sw s5, 92(s1) ; RV32IZCMP-NEXT: sw s4, 88(s1) -; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 84(s1) -; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 80(s1) -; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 76(s1) -; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 72(s1) -; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 68(s1) -; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 64(s1) -; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 60(s1) -; RV32IZCMP-NEXT: lw a0, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 56(s1) -; RV32IZCMP-NEXT: lw a0, 40(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 36(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 52(s1) -; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 40(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 48(s1) -; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 44(s1) -; RV32IZCMP-NEXT: lw a0, 52(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 40(s1) -; RV32IZCMP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 52(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 36(s1) -; RV32IZCMP-NEXT: lw a0, 60(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 32(s1) -; RV32IZCMP-NEXT: lw a0, 64(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 60(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 28(s1) -; RV32IZCMP-NEXT: lw a0, 68(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 64(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 24(s1) -; RV32IZCMP-NEXT: lw a0, 72(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 68(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 20(s1) -; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 72(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 16(s1) -; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+12)(s0) -; RV32IZCMP-NEXT: lw a0, 84(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+8)(s0) -; RV32IZCMP-NEXT: lw a0, 88(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 84(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var+4)(s0) -; RV32IZCMP-NEXT: lw a0, 92(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 88(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var)(s0) -; RV32IZCMP-NEXT: addi sp, sp, 48 +; RV32IZCMP-NEXT: addi sp, sp, 32 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112 ; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 112 ; @@ -2609,50 +2609,50 @@ define void @caller() { ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288 ; RV64IZCMP-NEXT: lui s0, %hi(var) ; RV64IZCMP-NEXT: lw a0, %lo(var)(s0) -; RV64IZCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 176(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+4)(s0) -; RV64IZCMP-NEXT: sd a0, 160(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+8)(s0) -; RV64IZCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 160(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var+12)(s0) -; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: addi s1, s0, %lo(var) ; RV64IZCMP-NEXT: lw a0, 16(s1) -; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 20(s1) -; RV64IZCMP-NEXT: sd a0, 128(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 24(s1) -; RV64IZCMP-NEXT: sd a0, 120(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 128(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 28(s1) -; RV64IZCMP-NEXT: sd a0, 112(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 120(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 32(s1) -; RV64IZCMP-NEXT: sd a0, 104(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 112(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 36(s1) -; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 104(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 40(s1) -; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 44(s1) -; RV64IZCMP-NEXT: sd a0, 80(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 48(s1) -; RV64IZCMP-NEXT: sd a0, 72(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 80(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 52(s1) -; RV64IZCMP-NEXT: sd a0, 64(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 72(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 56(s1) -; RV64IZCMP-NEXT: sd a0, 56(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 64(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 60(s1) -; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 56(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 64(s1) -; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 68(s1) -; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 72(s1) -; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 76(s1) -; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 80(s1) -; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 84(s1) -; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw s4, 88(s1) ; RV64IZCMP-NEXT: lw s5, 92(s1) ; RV64IZCMP-NEXT: lw s6, 96(s1) @@ -2674,49 +2674,49 @@ define void @caller() { ; RV64IZCMP-NEXT: sw s6, 96(s1) ; RV64IZCMP-NEXT: sw s5, 92(s1) ; RV64IZCMP-NEXT: sw s4, 88(s1) -; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 84(s1) ; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 80(s1) +; RV64IZCMP-NEXT: sw a0, 84(s1) ; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 76(s1) +; RV64IZCMP-NEXT: sw a0, 80(s1) ; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 72(s1) +; RV64IZCMP-NEXT: sw a0, 76(s1) ; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 68(s1) +; RV64IZCMP-NEXT: sw a0, 72(s1) ; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 64(s1) +; RV64IZCMP-NEXT: sw a0, 68(s1) ; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 60(s1) +; RV64IZCMP-NEXT: sw a0, 64(s1) ; RV64IZCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 56(s1) +; RV64IZCMP-NEXT: sw a0, 60(s1) ; RV64IZCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 52(s1) +; RV64IZCMP-NEXT: sw a0, 56(s1) ; RV64IZCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 48(s1) +; RV64IZCMP-NEXT: sw a0, 52(s1) ; RV64IZCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 44(s1) +; RV64IZCMP-NEXT: sw a0, 48(s1) ; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 40(s1) +; RV64IZCMP-NEXT: sw a0, 44(s1) ; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 36(s1) +; RV64IZCMP-NEXT: sw a0, 40(s1) ; RV64IZCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 32(s1) +; RV64IZCMP-NEXT: sw a0, 36(s1) ; RV64IZCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 28(s1) +; RV64IZCMP-NEXT: sw a0, 32(s1) ; RV64IZCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 24(s1) +; RV64IZCMP-NEXT: sw a0, 28(s1) ; RV64IZCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 20(s1) +; RV64IZCMP-NEXT: sw a0, 24(s1) ; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 16(s1) +; RV64IZCMP-NEXT: sw a0, 20(s1) ; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+12)(s0) +; RV64IZCMP-NEXT: sw a0, 16(s1) ; RV64IZCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+8)(s0) +; RV64IZCMP-NEXT: sw a0, %lo(var+12)(s0) ; RV64IZCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var+4)(s0) +; RV64IZCMP-NEXT: sw a0, %lo(var+8)(s0) ; RV64IZCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: sw a0, %lo(var+4)(s0) +; RV64IZCMP-NEXT: ld a0, 176(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var)(s0) ; RV64IZCMP-NEXT: addi sp, sp, 128 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160 diff --git a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir index e4b934c3036ae..2cca042bebee6 100644 --- a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir +++ b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir @@ -11,19 +11,6 @@ # RUN: -misched-dump-schedule-trace -misched-postra-direction=bidirectional \ # RUN: -o - %s 2>&1 | FileCheck --check-prefix=BIDIRECTIONAL %s -# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -passes=postmisched \ -# RUN: -enable-post-misched -debug-only=machine-scheduler \ -# RUN: -misched-dump-schedule-trace -misched-postra-direction=topdown \ -# RUN: -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s -# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -passes=postmisched \ -# RUN: -enable-post-misched -debug-only=machine-scheduler \ -# RUN: -misched-dump-schedule-trace -misched-postra-direction=bottomup \ -# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s -# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -passes=postmisched \ -# RUN: -enable-post-misched -debug-only=machine-scheduler \ -# RUN: -misched-dump-schedule-trace -misched-postra-direction=bidirectional \ -# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BIDIRECTIONAL %s - # REQUIRES: asserts --- diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll index 5a3b67adfaab1..1fbdaa76dfb68 100644 --- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll @@ -24,7 +24,7 @@ define i32 @foo() { ; RV32IZCMP-NEXT: .cfi_offset ra, -4 ; RV32IZCMP-NEXT: addi sp, sp, -464 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528 -; RV32IZCMP-NEXT: mv a0, sp +; RV32IZCMP-NEXT: addi a0, sp, 12 ; RV32IZCMP-NEXT: call test ; RV32IZCMP-NEXT: addi sp, sp, 464 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64 @@ -37,7 +37,7 @@ define i32 @foo() { ; RV64IZCMP-NEXT: .cfi_offset ra, -8 ; RV64IZCMP-NEXT: addi sp, sp, -464 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528 -; RV64IZCMP-NEXT: mv a0, sp +; RV64IZCMP-NEXT: addi a0, sp, 8 ; RV64IZCMP-NEXT: call test ; RV64IZCMP-NEXT: addi sp, sp, 464 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64 @@ -50,7 +50,7 @@ define i32 @foo() { ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4 ; RV32IZCMP-SR-NEXT: addi sp, sp, -464 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528 -; RV32IZCMP-SR-NEXT: mv a0, sp +; RV32IZCMP-SR-NEXT: addi a0, sp, 12 ; RV32IZCMP-SR-NEXT: call test ; RV32IZCMP-SR-NEXT: addi sp, sp, 464 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 @@ -63,7 +63,7 @@ define i32 @foo() { ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8 ; RV64IZCMP-SR-NEXT: addi sp, sp, -464 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528 -; RV64IZCMP-SR-NEXT: mv a0, sp +; RV64IZCMP-SR-NEXT: addi a0, sp, 8 ; RV64IZCMP-SR-NEXT: call test ; RV64IZCMP-SR-NEXT: addi sp, sp, 464 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 @@ -1775,54 +1775,52 @@ define void @foo_with_irq() "interrupt"="user" { ; RV32IZCMP-NEXT: cm.push {ra}, -64 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64 ; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: addi sp, sp, -16 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80 -; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: .cfi_offset t0, -20 -; RV32IZCMP-NEXT: .cfi_offset t1, -24 -; RV32IZCMP-NEXT: .cfi_offset t2, -28 -; RV32IZCMP-NEXT: .cfi_offset a0, -32 -; RV32IZCMP-NEXT: .cfi_offset a1, -36 -; RV32IZCMP-NEXT: .cfi_offset a2, -40 -; RV32IZCMP-NEXT: .cfi_offset a3, -44 -; RV32IZCMP-NEXT: .cfi_offset a4, -48 -; RV32IZCMP-NEXT: .cfi_offset a5, -52 -; RV32IZCMP-NEXT: .cfi_offset a6, -56 -; RV32IZCMP-NEXT: .cfi_offset a7, -60 -; RV32IZCMP-NEXT: .cfi_offset t3, -64 -; RV32IZCMP-NEXT: .cfi_offset t4, -68 -; RV32IZCMP-NEXT: .cfi_offset t5, -72 -; RV32IZCMP-NEXT: .cfi_offset t6, -76 +; RV32IZCMP-NEXT: sw t0, 56(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t1, 52(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t2, 48(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a1, 40(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a2, 36(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a3, 32(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a4, 28(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a5, 24(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a6, 20(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a7, 16(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t3, 12(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t4, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t5, 4(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t6, 0(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: .cfi_offset t0, -8 +; RV32IZCMP-NEXT: .cfi_offset t1, -12 +; RV32IZCMP-NEXT: .cfi_offset t2, -16 +; RV32IZCMP-NEXT: .cfi_offset a0, -20 +; RV32IZCMP-NEXT: .cfi_offset a1, -24 +; RV32IZCMP-NEXT: .cfi_offset a2, -28 +; RV32IZCMP-NEXT: .cfi_offset a3, -32 +; RV32IZCMP-NEXT: .cfi_offset a4, -36 +; RV32IZCMP-NEXT: .cfi_offset a5, -40 +; RV32IZCMP-NEXT: .cfi_offset a6, -44 +; RV32IZCMP-NEXT: .cfi_offset a7, -48 +; RV32IZCMP-NEXT: .cfi_offset t3, -52 +; RV32IZCMP-NEXT: .cfi_offset t4, -56 +; RV32IZCMP-NEXT: .cfi_offset t5, -60 +; RV32IZCMP-NEXT: .cfi_offset t6, -64 ; RV32IZCMP-NEXT: call foo_test_irq -; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t0, 56(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t1, 52(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t2, 48(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a1, 40(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a2, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a3, 32(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a4, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a5, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a6, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a7, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t3, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t4, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t5, 4(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t6, 0(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: .cfi_restore t0 ; RV32IZCMP-NEXT: .cfi_restore t1 ; RV32IZCMP-NEXT: .cfi_restore t2 @@ -1838,8 +1836,6 @@ define void @foo_with_irq() "interrupt"="user" { ; RV32IZCMP-NEXT: .cfi_restore t4 ; RV32IZCMP-NEXT: .cfi_restore t5 ; RV32IZCMP-NEXT: .cfi_restore t6 -; RV32IZCMP-NEXT: addi sp, sp, 16 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64 ; RV32IZCMP-NEXT: cm.pop {ra}, 64 ; RV32IZCMP-NEXT: .cfi_restore ra ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0 @@ -1850,54 +1846,54 @@ define void @foo_with_irq() "interrupt"="user" { ; RV64IZCMP-NEXT: cm.push {ra}, -64 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64 ; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: addi sp, sp, -80 -; RV64IZCMP-NEXT: .cfi_def_cfa_offset 144 -; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: .cfi_offset t0, -24 -; RV64IZCMP-NEXT: .cfi_offset t1, -32 -; RV64IZCMP-NEXT: .cfi_offset t2, -40 -; RV64IZCMP-NEXT: .cfi_offset a0, -48 -; RV64IZCMP-NEXT: .cfi_offset a1, -56 -; RV64IZCMP-NEXT: .cfi_offset a2, -64 -; RV64IZCMP-NEXT: .cfi_offset a3, -72 -; RV64IZCMP-NEXT: .cfi_offset a4, -80 -; RV64IZCMP-NEXT: .cfi_offset a5, -88 -; RV64IZCMP-NEXT: .cfi_offset a6, -96 -; RV64IZCMP-NEXT: .cfi_offset a7, -104 -; RV64IZCMP-NEXT: .cfi_offset t3, -112 -; RV64IZCMP-NEXT: .cfi_offset t4, -120 -; RV64IZCMP-NEXT: .cfi_offset t5, -128 -; RV64IZCMP-NEXT: .cfi_offset t6, -136 +; RV64IZCMP-NEXT: addi sp, sp, -64 +; RV64IZCMP-NEXT: .cfi_def_cfa_offset 128 +; RV64IZCMP-NEXT: sd t0, 112(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t1, 104(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t2, 96(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a1, 80(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a2, 72(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a3, 64(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a4, 56(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a5, 48(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a6, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a7, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t3, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t4, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t5, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t6, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: .cfi_offset t0, -16 +; RV64IZCMP-NEXT: .cfi_offset t1, -24 +; RV64IZCMP-NEXT: .cfi_offset t2, -32 +; RV64IZCMP-NEXT: .cfi_offset a0, -40 +; RV64IZCMP-NEXT: .cfi_offset a1, -48 +; RV64IZCMP-NEXT: .cfi_offset a2, -56 +; RV64IZCMP-NEXT: .cfi_offset a3, -64 +; RV64IZCMP-NEXT: .cfi_offset a4, -72 +; RV64IZCMP-NEXT: .cfi_offset a5, -80 +; RV64IZCMP-NEXT: .cfi_offset a6, -88 +; RV64IZCMP-NEXT: .cfi_offset a7, -96 +; RV64IZCMP-NEXT: .cfi_offset t3, -104 +; RV64IZCMP-NEXT: .cfi_offset t4, -112 +; RV64IZCMP-NEXT: .cfi_offset t5, -120 +; RV64IZCMP-NEXT: .cfi_offset t6, -128 ; RV64IZCMP-NEXT: call foo_test_irq -; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t0, 112(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t1, 104(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t2, 96(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a1, 80(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a2, 72(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a3, 64(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a4, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a5, 48(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a6, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a7, 32(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t3, 24(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t4, 16(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t5, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t6, 0(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: .cfi_restore t0 ; RV64IZCMP-NEXT: .cfi_restore t1 ; RV64IZCMP-NEXT: .cfi_restore t2 @@ -1913,7 +1909,7 @@ define void @foo_with_irq() "interrupt"="user" { ; RV64IZCMP-NEXT: .cfi_restore t4 ; RV64IZCMP-NEXT: .cfi_restore t5 ; RV64IZCMP-NEXT: .cfi_restore t6 -; RV64IZCMP-NEXT: addi sp, sp, 80 +; RV64IZCMP-NEXT: addi sp, sp, 64 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64 ; RV64IZCMP-NEXT: cm.pop {ra}, 64 ; RV64IZCMP-NEXT: .cfi_restore ra @@ -1925,54 +1921,52 @@ define void @foo_with_irq() "interrupt"="user" { ; RV32IZCMP-SR-NEXT: cm.push {ra}, -64 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-SR-NEXT: addi sp, sp, -16 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80 -; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: .cfi_offset t0, -20 -; RV32IZCMP-SR-NEXT: .cfi_offset t1, -24 -; RV32IZCMP-SR-NEXT: .cfi_offset t2, -28 -; RV32IZCMP-SR-NEXT: .cfi_offset a0, -32 -; RV32IZCMP-SR-NEXT: .cfi_offset a1, -36 -; RV32IZCMP-SR-NEXT: .cfi_offset a2, -40 -; RV32IZCMP-SR-NEXT: .cfi_offset a3, -44 -; RV32IZCMP-SR-NEXT: .cfi_offset a4, -48 -; RV32IZCMP-SR-NEXT: .cfi_offset a5, -52 -; RV32IZCMP-SR-NEXT: .cfi_offset a6, -56 -; RV32IZCMP-SR-NEXT: .cfi_offset a7, -60 -; RV32IZCMP-SR-NEXT: .cfi_offset t3, -64 -; RV32IZCMP-SR-NEXT: .cfi_offset t4, -68 -; RV32IZCMP-SR-NEXT: .cfi_offset t5, -72 -; RV32IZCMP-SR-NEXT: .cfi_offset t6, -76 +; RV32IZCMP-SR-NEXT: sw t0, 56(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t1, 52(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t2, 48(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a0, 44(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a1, 40(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a2, 36(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a3, 32(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a4, 28(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a5, 24(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a6, 20(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a7, 16(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t3, 12(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t4, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t5, 4(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t6, 0(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: .cfi_offset t0, -8 +; RV32IZCMP-SR-NEXT: .cfi_offset t1, -12 +; RV32IZCMP-SR-NEXT: .cfi_offset t2, -16 +; RV32IZCMP-SR-NEXT: .cfi_offset a0, -20 +; RV32IZCMP-SR-NEXT: .cfi_offset a1, -24 +; RV32IZCMP-SR-NEXT: .cfi_offset a2, -28 +; RV32IZCMP-SR-NEXT: .cfi_offset a3, -32 +; RV32IZCMP-SR-NEXT: .cfi_offset a4, -36 +; RV32IZCMP-SR-NEXT: .cfi_offset a5, -40 +; RV32IZCMP-SR-NEXT: .cfi_offset a6, -44 +; RV32IZCMP-SR-NEXT: .cfi_offset a7, -48 +; RV32IZCMP-SR-NEXT: .cfi_offset t3, -52 +; RV32IZCMP-SR-NEXT: .cfi_offset t4, -56 +; RV32IZCMP-SR-NEXT: .cfi_offset t5, -60 +; RV32IZCMP-SR-NEXT: .cfi_offset t6, -64 ; RV32IZCMP-SR-NEXT: call foo_test_irq -; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t0, 56(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t1, 52(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t2, 48(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 44(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a1, 40(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a2, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a3, 32(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a4, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a5, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a6, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a7, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t3, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t4, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t5, 4(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t6, 0(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: .cfi_restore t0 ; RV32IZCMP-SR-NEXT: .cfi_restore t1 ; RV32IZCMP-SR-NEXT: .cfi_restore t2 @@ -1988,8 +1982,6 @@ define void @foo_with_irq() "interrupt"="user" { ; RV32IZCMP-SR-NEXT: .cfi_restore t4 ; RV32IZCMP-SR-NEXT: .cfi_restore t5 ; RV32IZCMP-SR-NEXT: .cfi_restore t6 -; RV32IZCMP-SR-NEXT: addi sp, sp, 16 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 ; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64 ; RV32IZCMP-SR-NEXT: .cfi_restore ra ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0 @@ -2000,54 +1992,54 @@ define void @foo_with_irq() "interrupt"="user" { ; RV64IZCMP-SR-NEXT: cm.push {ra}, -64 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-SR-NEXT: addi sp, sp, -80 -; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 144 -; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: .cfi_offset t0, -24 -; RV64IZCMP-SR-NEXT: .cfi_offset t1, -32 -; RV64IZCMP-SR-NEXT: .cfi_offset t2, -40 -; RV64IZCMP-SR-NEXT: .cfi_offset a0, -48 -; RV64IZCMP-SR-NEXT: .cfi_offset a1, -56 -; RV64IZCMP-SR-NEXT: .cfi_offset a2, -64 -; RV64IZCMP-SR-NEXT: .cfi_offset a3, -72 -; RV64IZCMP-SR-NEXT: .cfi_offset a4, -80 -; RV64IZCMP-SR-NEXT: .cfi_offset a5, -88 -; RV64IZCMP-SR-NEXT: .cfi_offset a6, -96 -; RV64IZCMP-SR-NEXT: .cfi_offset a7, -104 -; RV64IZCMP-SR-NEXT: .cfi_offset t3, -112 -; RV64IZCMP-SR-NEXT: .cfi_offset t4, -120 -; RV64IZCMP-SR-NEXT: .cfi_offset t5, -128 -; RV64IZCMP-SR-NEXT: .cfi_offset t6, -136 +; RV64IZCMP-SR-NEXT: addi sp, sp, -64 +; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 128 +; RV64IZCMP-SR-NEXT: sd t0, 112(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t1, 104(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t2, 96(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 88(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a1, 80(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a2, 72(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a3, 64(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a4, 56(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a5, 48(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a6, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a7, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t3, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t4, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t5, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t6, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: .cfi_offset t0, -16 +; RV64IZCMP-SR-NEXT: .cfi_offset t1, -24 +; RV64IZCMP-SR-NEXT: .cfi_offset t2, -32 +; RV64IZCMP-SR-NEXT: .cfi_offset a0, -40 +; RV64IZCMP-SR-NEXT: .cfi_offset a1, -48 +; RV64IZCMP-SR-NEXT: .cfi_offset a2, -56 +; RV64IZCMP-SR-NEXT: .cfi_offset a3, -64 +; RV64IZCMP-SR-NEXT: .cfi_offset a4, -72 +; RV64IZCMP-SR-NEXT: .cfi_offset a5, -80 +; RV64IZCMP-SR-NEXT: .cfi_offset a6, -88 +; RV64IZCMP-SR-NEXT: .cfi_offset a7, -96 +; RV64IZCMP-SR-NEXT: .cfi_offset t3, -104 +; RV64IZCMP-SR-NEXT: .cfi_offset t4, -112 +; RV64IZCMP-SR-NEXT: .cfi_offset t5, -120 +; RV64IZCMP-SR-NEXT: .cfi_offset t6, -128 ; RV64IZCMP-SR-NEXT: call foo_test_irq -; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t0, 112(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t1, 104(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t2, 96(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 88(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a1, 80(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a2, 72(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a3, 64(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a4, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a5, 48(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a6, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a7, 32(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t3, 24(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t4, 16(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t5, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t6, 0(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: .cfi_restore t0 ; RV64IZCMP-SR-NEXT: .cfi_restore t1 ; RV64IZCMP-SR-NEXT: .cfi_restore t2 @@ -2063,7 +2055,7 @@ define void @foo_with_irq() "interrupt"="user" { ; RV64IZCMP-SR-NEXT: .cfi_restore t4 ; RV64IZCMP-SR-NEXT: .cfi_restore t5 ; RV64IZCMP-SR-NEXT: .cfi_restore t6 -; RV64IZCMP-SR-NEXT: addi sp, sp, 80 +; RV64IZCMP-SR-NEXT: addi sp, sp, 64 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64 ; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64 ; RV64IZCMP-SR-NEXT: .cfi_restore ra @@ -2299,52 +2291,52 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-NEXT: .cfi_offset s9, -12 ; RV32IZCMP-NEXT: .cfi_offset s10, -8 ; RV32IZCMP-NEXT: .cfi_offset s11, -4 -; RV32IZCMP-NEXT: addi sp, sp, -48 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160 -; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: .cfi_offset t0, -68 -; RV32IZCMP-NEXT: .cfi_offset t1, -72 -; RV32IZCMP-NEXT: .cfi_offset t2, -76 -; RV32IZCMP-NEXT: .cfi_offset a0, -80 -; RV32IZCMP-NEXT: .cfi_offset a1, -84 -; RV32IZCMP-NEXT: .cfi_offset a2, -88 -; RV32IZCMP-NEXT: .cfi_offset a3, -92 -; RV32IZCMP-NEXT: .cfi_offset a4, -96 -; RV32IZCMP-NEXT: .cfi_offset a5, -100 -; RV32IZCMP-NEXT: .cfi_offset a6, -104 -; RV32IZCMP-NEXT: .cfi_offset a7, -108 -; RV32IZCMP-NEXT: .cfi_offset t3, -112 -; RV32IZCMP-NEXT: .cfi_offset t4, -116 -; RV32IZCMP-NEXT: .cfi_offset t5, -120 -; RV32IZCMP-NEXT: .cfi_offset t6, -124 +; RV32IZCMP-NEXT: addi sp, sp, -32 +; RV32IZCMP-NEXT: .cfi_def_cfa_offset 144 +; RV32IZCMP-NEXT: sw t0, 88(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t1, 84(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t2, 80(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a1, 72(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a2, 68(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a3, 64(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a4, 60(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a5, 56(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a6, 52(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw a7, 48(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t3, 44(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t4, 40(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t5, 36(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: sw t6, 32(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: .cfi_offset t0, -56 +; RV32IZCMP-NEXT: .cfi_offset t1, -60 +; RV32IZCMP-NEXT: .cfi_offset t2, -64 +; RV32IZCMP-NEXT: .cfi_offset a0, -68 +; RV32IZCMP-NEXT: .cfi_offset a1, -72 +; RV32IZCMP-NEXT: .cfi_offset a2, -76 +; RV32IZCMP-NEXT: .cfi_offset a3, -80 +; RV32IZCMP-NEXT: .cfi_offset a4, -84 +; RV32IZCMP-NEXT: .cfi_offset a5, -88 +; RV32IZCMP-NEXT: .cfi_offset a6, -92 +; RV32IZCMP-NEXT: .cfi_offset a7, -96 +; RV32IZCMP-NEXT: .cfi_offset t3, -100 +; RV32IZCMP-NEXT: .cfi_offset t4, -104 +; RV32IZCMP-NEXT: .cfi_offset t5, -108 +; RV32IZCMP-NEXT: .cfi_offset t6, -112 ; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq) ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq) ; RV32IZCMP-NEXT: lw a0, 16(a5) -; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 20(a5) ; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, 20(a5) +; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: lw t4, 24(a5) ; RV32IZCMP-NEXT: lw t5, 28(a5) ; RV32IZCMP-NEXT: lw t6, 32(a5) @@ -2397,33 +2389,33 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-NEXT: sw t6, 32(a5) ; RV32IZCMP-NEXT: sw t5, 28(a5) ; RV32IZCMP-NEXT: sw t4, 24(a5) -; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 20(a5) -; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 16(a5) -; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload -; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t0, 88(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t1, 84(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t2, 80(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a1, 72(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a2, 68(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a3, 64(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a4, 60(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a5, 56(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a6, 52(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a7, 48(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t3, 44(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t4, 40(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t5, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw t6, 32(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: .cfi_restore t0 ; RV32IZCMP-NEXT: .cfi_restore t1 ; RV32IZCMP-NEXT: .cfi_restore t2 @@ -2439,7 +2431,7 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-NEXT: .cfi_restore t4 ; RV32IZCMP-NEXT: .cfi_restore t5 ; RV32IZCMP-NEXT: .cfi_restore t6 -; RV32IZCMP-NEXT: addi sp, sp, 48 +; RV32IZCMP-NEXT: addi sp, sp, 32 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112 ; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112 ; RV32IZCMP-NEXT: .cfi_restore ra @@ -2475,52 +2467,52 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-NEXT: .cfi_offset s9, -24 ; RV64IZCMP-NEXT: .cfi_offset s10, -16 ; RV64IZCMP-NEXT: .cfi_offset s11, -8 -; RV64IZCMP-NEXT: addi sp, sp, -128 -; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288 -; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: .cfi_offset t0, -120 -; RV64IZCMP-NEXT: .cfi_offset t1, -128 -; RV64IZCMP-NEXT: .cfi_offset t2, -136 -; RV64IZCMP-NEXT: .cfi_offset a0, -144 -; RV64IZCMP-NEXT: .cfi_offset a1, -152 -; RV64IZCMP-NEXT: .cfi_offset a2, -160 -; RV64IZCMP-NEXT: .cfi_offset a3, -168 -; RV64IZCMP-NEXT: .cfi_offset a4, -176 -; RV64IZCMP-NEXT: .cfi_offset a5, -184 -; RV64IZCMP-NEXT: .cfi_offset a6, -192 -; RV64IZCMP-NEXT: .cfi_offset a7, -200 -; RV64IZCMP-NEXT: .cfi_offset t3, -208 -; RV64IZCMP-NEXT: .cfi_offset t4, -216 -; RV64IZCMP-NEXT: .cfi_offset t5, -224 -; RV64IZCMP-NEXT: .cfi_offset t6, -232 +; RV64IZCMP-NEXT: addi sp, sp, -112 +; RV64IZCMP-NEXT: .cfi_def_cfa_offset 272 +; RV64IZCMP-NEXT: sd t0, 160(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t1, 152(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t2, 144(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a1, 128(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a2, 120(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a3, 112(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a4, 104(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a5, 96(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a6, 88(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a7, 80(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t3, 72(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t4, 64(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t5, 56(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd t6, 48(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: .cfi_offset t0, -112 +; RV64IZCMP-NEXT: .cfi_offset t1, -120 +; RV64IZCMP-NEXT: .cfi_offset t2, -128 +; RV64IZCMP-NEXT: .cfi_offset a0, -136 +; RV64IZCMP-NEXT: .cfi_offset a1, -144 +; RV64IZCMP-NEXT: .cfi_offset a2, -152 +; RV64IZCMP-NEXT: .cfi_offset a3, -160 +; RV64IZCMP-NEXT: .cfi_offset a4, -168 +; RV64IZCMP-NEXT: .cfi_offset a5, -176 +; RV64IZCMP-NEXT: .cfi_offset a6, -184 +; RV64IZCMP-NEXT: .cfi_offset a7, -192 +; RV64IZCMP-NEXT: .cfi_offset t3, -200 +; RV64IZCMP-NEXT: .cfi_offset t4, -208 +; RV64IZCMP-NEXT: .cfi_offset t5, -216 +; RV64IZCMP-NEXT: .cfi_offset t6, -224 ; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq) ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq) ; RV64IZCMP-NEXT: lw a0, 16(a5) -; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill -; RV64IZCMP-NEXT: lw a0, 20(a5) ; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: lw a0, 20(a5) +; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw t4, 24(a5) ; RV64IZCMP-NEXT: lw t5, 28(a5) ; RV64IZCMP-NEXT: lw t6, 32(a5) @@ -2573,33 +2565,33 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-NEXT: sw t6, 32(a5) ; RV64IZCMP-NEXT: sw t5, 28(a5) ; RV64IZCMP-NEXT: sw t4, 24(a5) -; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, 20(a5) -; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, 16(a5) -; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t0, 160(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t1, 152(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t2, 144(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a1, 128(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a2, 120(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a3, 112(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a4, 104(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a5, 96(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a6, 88(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld a7, 80(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t3, 72(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t4, 64(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t5, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: ld t6, 48(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: .cfi_restore t0 ; RV64IZCMP-NEXT: .cfi_restore t1 ; RV64IZCMP-NEXT: .cfi_restore t2 @@ -2615,7 +2607,7 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-NEXT: .cfi_restore t4 ; RV64IZCMP-NEXT: .cfi_restore t5 ; RV64IZCMP-NEXT: .cfi_restore t6 -; RV64IZCMP-NEXT: addi sp, sp, 128 +; RV64IZCMP-NEXT: addi sp, sp, 112 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160 ; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160 ; RV64IZCMP-NEXT: .cfi_restore ra @@ -2651,52 +2643,52 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12 ; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8 ; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4 -; RV32IZCMP-SR-NEXT: addi sp, sp, -48 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 160 -; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: .cfi_offset t0, -68 -; RV32IZCMP-SR-NEXT: .cfi_offset t1, -72 -; RV32IZCMP-SR-NEXT: .cfi_offset t2, -76 -; RV32IZCMP-SR-NEXT: .cfi_offset a0, -80 -; RV32IZCMP-SR-NEXT: .cfi_offset a1, -84 -; RV32IZCMP-SR-NEXT: .cfi_offset a2, -88 -; RV32IZCMP-SR-NEXT: .cfi_offset a3, -92 -; RV32IZCMP-SR-NEXT: .cfi_offset a4, -96 -; RV32IZCMP-SR-NEXT: .cfi_offset a5, -100 -; RV32IZCMP-SR-NEXT: .cfi_offset a6, -104 -; RV32IZCMP-SR-NEXT: .cfi_offset a7, -108 -; RV32IZCMP-SR-NEXT: .cfi_offset t3, -112 -; RV32IZCMP-SR-NEXT: .cfi_offset t4, -116 -; RV32IZCMP-SR-NEXT: .cfi_offset t5, -120 -; RV32IZCMP-SR-NEXT: .cfi_offset t6, -124 +; RV32IZCMP-SR-NEXT: addi sp, sp, -32 +; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 144 +; RV32IZCMP-SR-NEXT: sw t0, 88(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t1, 84(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t2, 80(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a0, 76(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a1, 72(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a2, 68(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a3, 64(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a4, 60(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a5, 56(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a6, 52(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw a7, 48(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t3, 44(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t4, 40(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t5, 36(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: sw t6, 32(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: .cfi_offset t0, -56 +; RV32IZCMP-SR-NEXT: .cfi_offset t1, -60 +; RV32IZCMP-SR-NEXT: .cfi_offset t2, -64 +; RV32IZCMP-SR-NEXT: .cfi_offset a0, -68 +; RV32IZCMP-SR-NEXT: .cfi_offset a1, -72 +; RV32IZCMP-SR-NEXT: .cfi_offset a2, -76 +; RV32IZCMP-SR-NEXT: .cfi_offset a3, -80 +; RV32IZCMP-SR-NEXT: .cfi_offset a4, -84 +; RV32IZCMP-SR-NEXT: .cfi_offset a5, -88 +; RV32IZCMP-SR-NEXT: .cfi_offset a6, -92 +; RV32IZCMP-SR-NEXT: .cfi_offset a7, -96 +; RV32IZCMP-SR-NEXT: .cfi_offset t3, -100 +; RV32IZCMP-SR-NEXT: .cfi_offset t4, -104 +; RV32IZCMP-SR-NEXT: .cfi_offset t5, -108 +; RV32IZCMP-SR-NEXT: .cfi_offset t6, -112 ; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq) ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill ; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) ; RV32IZCMP-SR-NEXT: lw a0, 16(a5) -; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, 20(a5) ; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: lw a0, 20(a5) +; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill ; RV32IZCMP-SR-NEXT: lw t4, 24(a5) ; RV32IZCMP-SR-NEXT: lw t5, 28(a5) ; RV32IZCMP-SR-NEXT: lw t6, 32(a5) @@ -2749,33 +2741,33 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-SR-NEXT: sw t6, 32(a5) ; RV32IZCMP-SR-NEXT: sw t5, 28(a5) ; RV32IZCMP-SR-NEXT: sw t4, 24(a5) -; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, 20(a5) -; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, 16(a5) -; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload -; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t0, 88(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t1, 84(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t2, 80(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 76(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a1, 72(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a2, 68(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a3, 64(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a4, 60(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a5, 56(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a6, 52(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a7, 48(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t3, 44(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t4, 40(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t5, 36(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw t6, 32(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: .cfi_restore t0 ; RV32IZCMP-SR-NEXT: .cfi_restore t1 ; RV32IZCMP-SR-NEXT: .cfi_restore t2 @@ -2791,7 +2783,7 @@ define void @callee_with_irq() "interrupt"="user" { ; RV32IZCMP-SR-NEXT: .cfi_restore t4 ; RV32IZCMP-SR-NEXT: .cfi_restore t5 ; RV32IZCMP-SR-NEXT: .cfi_restore t6 -; RV32IZCMP-SR-NEXT: addi sp, sp, 48 +; RV32IZCMP-SR-NEXT: addi sp, sp, 32 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112 ; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112 ; RV32IZCMP-SR-NEXT: .cfi_restore ra @@ -2827,52 +2819,52 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24 ; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16 ; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8 -; RV64IZCMP-SR-NEXT: addi sp, sp, -128 -; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 288 -; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: .cfi_offset t0, -120 -; RV64IZCMP-SR-NEXT: .cfi_offset t1, -128 -; RV64IZCMP-SR-NEXT: .cfi_offset t2, -136 -; RV64IZCMP-SR-NEXT: .cfi_offset a0, -144 -; RV64IZCMP-SR-NEXT: .cfi_offset a1, -152 -; RV64IZCMP-SR-NEXT: .cfi_offset a2, -160 -; RV64IZCMP-SR-NEXT: .cfi_offset a3, -168 -; RV64IZCMP-SR-NEXT: .cfi_offset a4, -176 -; RV64IZCMP-SR-NEXT: .cfi_offset a5, -184 -; RV64IZCMP-SR-NEXT: .cfi_offset a6, -192 -; RV64IZCMP-SR-NEXT: .cfi_offset a7, -200 -; RV64IZCMP-SR-NEXT: .cfi_offset t3, -208 -; RV64IZCMP-SR-NEXT: .cfi_offset t4, -216 -; RV64IZCMP-SR-NEXT: .cfi_offset t5, -224 -; RV64IZCMP-SR-NEXT: .cfi_offset t6, -232 +; RV64IZCMP-SR-NEXT: addi sp, sp, -112 +; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 272 +; RV64IZCMP-SR-NEXT: sd t0, 160(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t1, 152(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t2, 144(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 136(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a1, 128(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a2, 120(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a3, 112(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a4, 104(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a5, 96(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a6, 88(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a7, 80(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t3, 72(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t4, 64(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t5, 56(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd t6, 48(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: .cfi_offset t0, -112 +; RV64IZCMP-SR-NEXT: .cfi_offset t1, -120 +; RV64IZCMP-SR-NEXT: .cfi_offset t2, -128 +; RV64IZCMP-SR-NEXT: .cfi_offset a0, -136 +; RV64IZCMP-SR-NEXT: .cfi_offset a1, -144 +; RV64IZCMP-SR-NEXT: .cfi_offset a2, -152 +; RV64IZCMP-SR-NEXT: .cfi_offset a3, -160 +; RV64IZCMP-SR-NEXT: .cfi_offset a4, -168 +; RV64IZCMP-SR-NEXT: .cfi_offset a5, -176 +; RV64IZCMP-SR-NEXT: .cfi_offset a6, -184 +; RV64IZCMP-SR-NEXT: .cfi_offset a7, -192 +; RV64IZCMP-SR-NEXT: .cfi_offset t3, -200 +; RV64IZCMP-SR-NEXT: .cfi_offset t4, -208 +; RV64IZCMP-SR-NEXT: .cfi_offset t5, -216 +; RV64IZCMP-SR-NEXT: .cfi_offset t6, -224 ; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq) ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) ; RV64IZCMP-SR-NEXT: lw a0, 16(a5) -; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill -; RV64IZCMP-SR-NEXT: lw a0, 20(a5) ; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: lw a0, 20(a5) +; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw t4, 24(a5) ; RV64IZCMP-SR-NEXT: lw t5, 28(a5) ; RV64IZCMP-SR-NEXT: lw t6, 32(a5) @@ -2925,33 +2917,33 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-SR-NEXT: sw t6, 32(a5) ; RV64IZCMP-SR-NEXT: sw t5, 28(a5) ; RV64IZCMP-SR-NEXT: sw t4, 24(a5) -; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, 20(a5) -; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, 16(a5) -; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t0, 160(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t1, 152(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t2, 144(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a0, 136(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a1, 128(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a2, 120(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a3, 112(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a4, 104(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a5, 96(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a6, 88(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld a7, 80(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t3, 72(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t4, 64(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t5, 56(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: ld t6, 48(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: .cfi_restore t0 ; RV64IZCMP-SR-NEXT: .cfi_restore t1 ; RV64IZCMP-SR-NEXT: .cfi_restore t2 @@ -2967,7 +2959,7 @@ define void @callee_with_irq() "interrupt"="user" { ; RV64IZCMP-SR-NEXT: .cfi_restore t4 ; RV64IZCMP-SR-NEXT: .cfi_restore t5 ; RV64IZCMP-SR-NEXT: .cfi_restore t6 -; RV64IZCMP-SR-NEXT: addi sp, sp, 128 +; RV64IZCMP-SR-NEXT: addi sp, sp, 112 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160 ; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160 ; RV64IZCMP-SR-NEXT: .cfi_restore ra @@ -3389,8 +3381,8 @@ define void @callee_with_irq() "interrupt"="user" { define void @callee_no_irq() { ; RV32IZCMP-LABEL: callee_no_irq: ; RV32IZCMP: # %bb.0: -; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96 +; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -80 +; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80 ; RV32IZCMP-NEXT: .cfi_offset ra, -52 ; RV32IZCMP-NEXT: .cfi_offset s0, -48 ; RV32IZCMP-NEXT: .cfi_offset s1, -44 @@ -3406,18 +3398,18 @@ define void @callee_no_irq() { ; RV32IZCMP-NEXT: .cfi_offset s11, -4 ; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq) ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq) ; RV32IZCMP-NEXT: lw a0, 16(a5) -; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-NEXT: lw a0, 20(a5) ; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-NEXT: lw a0, 20(a5) +; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill ; RV32IZCMP-NEXT: lw t4, 24(a5) ; RV32IZCMP-NEXT: lw t5, 28(a5) ; RV32IZCMP-NEXT: lw t6, 32(a5) @@ -3470,19 +3462,19 @@ define void @callee_no_irq() { ; RV32IZCMP-NEXT: sw t6, 32(a5) ; RV32IZCMP-NEXT: sw t5, 28(a5) ; RV32IZCMP-NEXT: sw t4, 24(a5) -; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 20(a5) -; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, 16(a5) -; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96 +; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 80 ; ; RV64IZCMP-LABEL: callee_no_irq: ; RV64IZCMP: # %bb.0: @@ -3503,18 +3495,18 @@ define void @callee_no_irq() { ; RV64IZCMP-NEXT: .cfi_offset s11, -8 ; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq) ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) -; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) -; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) -; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq) ; RV64IZCMP-NEXT: lw a0, 16(a5) -; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw a0, 20(a5) -; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill ; RV64IZCMP-NEXT: lw t4, 24(a5) ; RV64IZCMP-NEXT: lw t5, 28(a5) ; RV64IZCMP-NEXT: lw t6, 32(a5) @@ -3567,24 +3559,24 @@ define void @callee_no_irq() { ; RV64IZCMP-NEXT: sw t6, 32(a5) ; RV64IZCMP-NEXT: sw t5, 28(a5) ; RV64IZCMP-NEXT: sw t4, 24(a5) -; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 20(a5) ; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, 16(a5) +; RV64IZCMP-NEXT: sw a0, 20(a5) ; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-NEXT: sw a0, 16(a5) ; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) +; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) ; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) +; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) ; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) +; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0) ; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160 ; ; RV32IZCMP-SR-LABEL: callee_no_irq: ; RV32IZCMP-SR: # %bb.0: -; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 96 +; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -80 +; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48 ; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44 @@ -3600,18 +3592,18 @@ define void @callee_no_irq() { ; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4 ; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq) ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) ; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) +; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill ; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) ; RV32IZCMP-SR-NEXT: lw a0, 16(a5) -; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill -; RV32IZCMP-SR-NEXT: lw a0, 20(a5) ; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill +; RV32IZCMP-SR-NEXT: lw a0, 20(a5) +; RV32IZCMP-SR-NEXT: sw a0, 4(sp) # 4-byte Folded Spill ; RV32IZCMP-SR-NEXT: lw t4, 24(a5) ; RV32IZCMP-SR-NEXT: lw t5, 28(a5) ; RV32IZCMP-SR-NEXT: lw t6, 32(a5) @@ -3664,19 +3656,19 @@ define void @callee_no_irq() { ; RV32IZCMP-SR-NEXT: sw t6, 32(a5) ; RV32IZCMP-SR-NEXT: sw t5, 28(a5) ; RV32IZCMP-SR-NEXT: sw t4, 24(a5) -; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 4(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, 20(a5) -; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, 16(a5) -; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) -; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload +; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) -; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 96 +; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 80 ; ; RV64IZCMP-SR-LABEL: callee_no_irq: ; RV64IZCMP-SR: # %bb.0: @@ -3697,18 +3689,18 @@ define void @callee_no_irq() { ; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8 ; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq) ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) -; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) -; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) -; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) -; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) ; RV64IZCMP-SR-NEXT: lw a0, 16(a5) -; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw a0, 20(a5) -; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill +; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill ; RV64IZCMP-SR-NEXT: lw t4, 24(a5) ; RV64IZCMP-SR-NEXT: lw t5, 28(a5) ; RV64IZCMP-SR-NEXT: lw t6, 32(a5) @@ -3761,17 +3753,17 @@ define void @callee_no_irq() { ; RV64IZCMP-SR-NEXT: sw t6, 32(a5) ; RV64IZCMP-SR-NEXT: sw t5, 28(a5) ; RV64IZCMP-SR-NEXT: sw t4, 24(a5) -; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: sw a0, 20(a5) ; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: sw a0, 16(a5) +; RV64IZCMP-SR-NEXT: sw a0, 20(a5) ; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) +; RV64IZCMP-SR-NEXT: sw a0, 16(a5) ; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) +; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) ; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload -; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) +; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) ; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload +; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) +; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 160 ; @@ -4061,71 +4053,71 @@ declare ptr @llvm.frameaddress.p0(i32 immarg) define i32 @use_fp(i32 %x) { ; RV32IZCMP-LABEL: use_fp: ; RV32IZCMP: # %bb.0: # %entry -; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -32 -; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32 +; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16 +; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 ; RV32IZCMP-NEXT: .cfi_offset ra, -12 ; RV32IZCMP-NEXT: .cfi_offset s0, -8 ; RV32IZCMP-NEXT: .cfi_offset s1, -4 -; RV32IZCMP-NEXT: addi s0, sp, 32 +; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: mv s1, a0 -; RV32IZCMP-NEXT: addi a1, s0, -20 +; RV32IZCMP-NEXT: addi a1, s0, -16 ; RV32IZCMP-NEXT: mv a0, s0 ; RV32IZCMP-NEXT: call bar ; RV32IZCMP-NEXT: mv a0, s1 -; RV32IZCMP-NEXT: .cfi_def_cfa sp, 32 -; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 32 +; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16 +; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16 ; ; RV64IZCMP-LABEL: use_fp: ; RV64IZCMP: # %bb.0: # %entry -; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -48 -; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48 +; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32 +; RV64IZCMP-NEXT: .cfi_def_cfa_offset 32 ; RV64IZCMP-NEXT: .cfi_offset ra, -24 ; RV64IZCMP-NEXT: .cfi_offset s0, -16 ; RV64IZCMP-NEXT: .cfi_offset s1, -8 -; RV64IZCMP-NEXT: addi s0, sp, 48 +; RV64IZCMP-NEXT: addi s0, sp, 32 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: mv s1, a0 -; RV64IZCMP-NEXT: addi a1, s0, -36 +; RV64IZCMP-NEXT: addi a1, s0, -28 ; RV64IZCMP-NEXT: mv a0, s0 ; RV64IZCMP-NEXT: call bar ; RV64IZCMP-NEXT: mv a0, s1 -; RV64IZCMP-NEXT: .cfi_def_cfa sp, 48 -; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 48 +; RV64IZCMP-NEXT: .cfi_def_cfa sp, 32 +; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32 ; ; RV32IZCMP-SR-LABEL: use_fp: ; RV32IZCMP-SR: # %bb.0: # %entry -; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32 +; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -16 +; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8 ; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4 -; RV32IZCMP-SR-NEXT: addi s0, sp, 32 +; RV32IZCMP-SR-NEXT: addi s0, sp, 16 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-SR-NEXT: mv s1, a0 -; RV32IZCMP-SR-NEXT: addi a1, s0, -20 +; RV32IZCMP-SR-NEXT: addi a1, s0, -16 ; RV32IZCMP-SR-NEXT: mv a0, s0 ; RV32IZCMP-SR-NEXT: call bar ; RV32IZCMP-SR-NEXT: mv a0, s1 -; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 32 -; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32 +; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16 +; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16 ; ; RV64IZCMP-SR-LABEL: use_fp: ; RV64IZCMP-SR: # %bb.0: # %entry -; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -48 -; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48 +; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32 +; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 32 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16 ; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8 -; RV64IZCMP-SR-NEXT: addi s0, sp, 48 +; RV64IZCMP-SR-NEXT: addi s0, sp, 32 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-SR-NEXT: mv s1, a0 -; RV64IZCMP-SR-NEXT: addi a1, s0, -36 +; RV64IZCMP-SR-NEXT: addi a1, s0, -28 ; RV64IZCMP-SR-NEXT: mv a0, s0 ; RV64IZCMP-SR-NEXT: call bar ; RV64IZCMP-SR-NEXT: mv a0, s1 -; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 48 -; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 48 +; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 32 +; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32 ; ; RV32I-LABEL: use_fp: ; RV32I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index bf2fdafc380da..1205ff17d113e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -102,39 +102,35 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 7, i32 %gvl) %call = call signext i32 @puts(ptr @.str) diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll index c98b9b80378fd..61c1de588a6e1 100644 --- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll +++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll @@ -8,8 +8,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 { ; RV32-NEXT: .cfi_offset ra, -12 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: .cfi_offset s1, -4 -; RV32-NEXT: addi sp, sp, -8 -; RV32-NEXT: .cfi_def_cfa_offset 24 +; RV32-NEXT: addi sp, sp, -4 +; RV32-NEXT: .cfi_def_cfa_offset 20 ; RV32-NEXT: sw a4, 4(sp) # 4-byte Folded Spill ; RV32-NEXT: sw a2, 0(sp) # 4-byte Folded Spill ; RV32-NEXT: mv a2, a1 @@ -33,7 +33,7 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 { ; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: sb a0, 0(s0) ; RV32-NEXT: mv a0, s1 -; RV32-NEXT: addi sp, sp, 8 +; RV32-NEXT: addi sp, sp, 4 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: cm.popret {ra, s0-s1}, 16 entry: diff --git a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll index d2ecba2fe8d18..638a3af00eec8 100644 --- a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll +++ b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll @@ -8,31 +8,31 @@ declare void @callee() define float @foo(float %arg) { ; RV32-LABEL: foo: ; RV32: # %bb.0: # %entry -; RV32-NEXT: cm.push {ra}, -32 -; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: cm.push {ra}, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: .cfi_offset ra, -4 -; RV32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: .cfi_offset fs0, -20 +; RV32-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset fs0, -8 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: call callee ; RV32-NEXT: fmv.s fa0, fs0 -; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore fs0 -; RV32-NEXT: cm.popret {ra}, 32 +; RV32-NEXT: cm.popret {ra}, 16 ; ; RV64-LABEL: foo: ; RV64: # %bb.0: # %entry -; RV64-NEXT: cm.push {ra}, -32 -; RV64-NEXT: .cfi_def_cfa_offset 32 +; RV64-NEXT: cm.push {ra}, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: .cfi_offset ra, -8 -; RV64-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill -; RV64-NEXT: .cfi_offset fs0, -20 +; RV64-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV64-NEXT: .cfi_offset fs0, -12 ; RV64-NEXT: fmv.s fs0, fa0 ; RV64-NEXT: call callee ; RV64-NEXT: fmv.s fa0, fs0 -; RV64-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload +; RV64-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV64-NEXT: .cfi_restore fs0 -; RV64-NEXT: cm.popret {ra}, 32 +; RV64-NEXT: cm.popret {ra}, 16 entry: call void @callee() ret float %arg @@ -41,20 +41,20 @@ entry: define void @foo2(i32 %x, float %y) { ; RV32-LABEL: foo2: ; RV32: # %bb.0: # %entry -; RV32-NEXT: cm.push {ra, s0}, -32 -; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: cm.push {ra, s0}, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: .cfi_offset ra, -8 ; RV32-NEXT: .cfi_offset s0, -4 -; RV32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: .cfi_offset fs0, -20 +; RV32-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset fs0, -12 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: call bar ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: fmv.s fa0, fs0 -; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore fs0 -; RV32-NEXT: cm.pop {ra, s0}, 32 +; RV32-NEXT: cm.pop {ra, s0}, 16 ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll index 25dcc90cb61cd..d810ef9ccecc4 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll @@ -2,6 +2,7 @@ ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-library %s -o - -filetype=obj | spirv-val %} ; CHECK-DAG: [[float:%[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: [[v2float:%[0-9]+]] = OpTypeVector [[float]] 2 ; CHECK-DAG: [[v4float:%[0-9]+]] = OpTypeVector [[float]] 4 ; CHECK-DAG: [[int:%[0-9]+]] = OpTypeInt 32 0 ; CHECK-DAG: [[zero:%[0-9]+]] = OpConstant [[int]] 0 @@ -10,10 +11,11 @@ ; CHECK-DAG: [[twenty_three:%[0-9]+]] = OpConstant [[int]] 23 ; CHECK-DAG: [[ImageType:%[0-9]+]] = OpTypeImage [[float]] Buffer 2 0 0 2 Rgba32f ; CHECK-DAG: [[ImagePtr:%[0-9]+]] = OpTypePointer UniformConstant [[ImageType]] -; CHECK: [[Var:%[0-9]+]] = OpVariable [[ImagePtr]] UniformConstant +; CHECK-DAG: [[Var:%[0-9]+]] = OpVariable [[ImagePtr]] UniformConstant ; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) -define void @main() local_unnamed_addr #0 { +; CHECK: OpFunction +define void @main_scalar() local_unnamed_addr #0 { entry: ; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) @@ -50,6 +52,86 @@ bb_both: ret void } +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) +; CHECK: OpFunction +define void @main_vector2() local_unnamed_addr #0 { +entry: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] + %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) + +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[one]] +; CHECK: [[E0:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 0 +; CHECK: [[E1:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 1 +; CHECK: [[V:%[0-9]+]] = OpCompositeConstruct [[v2float]] [[E0]] [[E1]] + %0 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 1) + %1 = load <2 x float>, ptr %0, align 4 +; CHECK: OpBranch [[bb_store:%[0-9]+]] + br label %bb_store + +; CHECK: [[bb_store]] = OpLabel +bb_store: + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[zero]] [[V]] + %2 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 0) + store <2 x float> %1, ptr %2, align 4 +; CHECK: OpBranch [[bb_both:%[0-9]+]] + br label %bb_both + +; CHECK: [[bb_both]] = OpLabel +bb_both: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[twenty_three]] +; CHECK: [[E0:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 0 +; CHECK: [[E1:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 1 +; CHECK: [[V:%[0-9]+]] = OpCompositeConstruct [[v2float]] [[E0]] [[E1]] + %3 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 23) + %4 = load <2 x float>, ptr %3, align 4 + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[twenty]] [[V]] + %5 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 20) + store <2 x float> %4, ptr %5, align 4 + ret void +} + +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) +; CHECK: OpFunction +define void @main_vector4() local_unnamed_addr #0 { +entry: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] + %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) + +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[one]] + %0 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 1) + %1 = load <4 x float>, ptr %0, align 4 +; CHECK: OpBranch [[bb_store:%[0-9]+]] + br label %bb_store + +; CHECK: [[bb_store]] = OpLabel +bb_store: + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[zero]] [[R]] + %2 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 0) + store <4 x float> %1, ptr %2, align 4 +; CHECK: OpBranch [[bb_both:%[0-9]+]] + br label %bb_both + +; CHECK: [[bb_both]] = OpLabel +bb_both: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[twenty_three]] + %3 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 23) + %4 = load <4 x float>, ptr %3, align 4 + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[twenty]] [[R]] + %5 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 20) + store <4 x float> %4, ptr %5, align 4 + ret void +} + ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none) declare ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1), i32) #1 diff --git a/llvm/test/CodeGen/X86/GlobalISel/isel-fcmp-i686.mir b/llvm/test/CodeGen/X86/GlobalISel/isel-fcmp-i686.mir new file mode 100644 index 0000000000000..fb4e25f2a5ced --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/isel-fcmp-i686.mir @@ -0,0 +1,399 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# NOTE: This MIR test is required because the support for 64 bit memory ops is missing in i686 mode, Due to distinction between float/int types, support is expected in near future and there is this RFC in place https://discourse.llvm.org/t/rfc-globalisel-adding-fp-type-information-to-llt/83349. Once this support is introduced this test must be dropped and integrated into the LLVM IR tests. +# RUN: llc -mtriple=i686-linux-gnu -mattr=+x87,-sse,-sse2 -run-pass=regbankselect,instruction-select -disable-gisel-legality-check -global-isel %s -o - | FileCheck %s --check-prefixes GISEL-X86 + +--- +name: fcmp_double_oeq +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_oeq + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; GISEL-X86-NEXT: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; GISEL-X86-NEXT: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags + ; GISEL-X86-NEXT: $al = COPY [[AND8rr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(oeq), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ogt +alignment: 16 +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ogt + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ogt), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_oge +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_oge + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(oge), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_olt +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_olt + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m1]], [[LD_Fp64m]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(olt), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ole +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ole + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m1]], [[LD_Fp64m]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ole), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_one +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_one + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(one), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ord +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ord + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ord), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_uno +alignment: 16 +exposesReturnsTwice: false +legalized: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_uno + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(uno), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ueq +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ueq + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ueq), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ugt +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ugt + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m1]], [[LD_Fp64m]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ugt), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_uge +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_uge + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m1]], [[LD_Fp64m]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(uge), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ult +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true + +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ult + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ult), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_ule +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: [] +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_ule + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags + ; GISEL-X86-NEXT: $al = COPY [[SETCCr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(ule), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: fcmp_double_une +alignment: 16 +exposesReturnsTwice: false +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, type: default, offset: 8, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +body: | + bb.1.entry: + ; GISEL-X86-LABEL: name: fcmp_double_une + ; GISEL-X86: [[LD_Fp64m:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0) + ; GISEL-X86-NEXT: [[LD_Fp64m1:%[0-9]+]]:rfp64 = LD_Fp64m %fixed-stack.0, 1, $noreg, 0, $noreg, implicit-def $fpsw, implicit $fpcw :: (invariant load (s64) from %fixed-stack.0 + 8) + ; GISEL-X86-NEXT: UCOM_FpIr64 [[LD_Fp64m]], [[LD_Fp64m1]], implicit-def $eflags, implicit-def $fpsw, implicit $fpcw + ; GISEL-X86-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags + ; GISEL-X86-NEXT: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags + ; GISEL-X86-NEXT: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags + ; GISEL-X86-NEXT: $al = COPY [[OR8rr]] + ; GISEL-X86-NEXT: RET 0, implicit $al + %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %2:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0, align 8) + %1:_(s64) = G_LOAD %3(p0) :: (invariant load (s64) from %fixed-stack.0 + 8, basealign 8) + %4:_(s8) = G_FCMP floatpred(une), %2(s64), %1 + $al = COPY %4(s8) + RET 0, implicit $al +... diff --git a/llvm/test/CodeGen/X86/dwo-stats.ll b/llvm/test/CodeGen/X86/dwo-stats.ll new file mode 100644 index 0000000000000..fccfd55029c8b --- /dev/null +++ b/llvm/test/CodeGen/X86/dwo-stats.ll @@ -0,0 +1,30 @@ +; REQUIRES: asserts +; RUN: llc %s -mtriple=x86_64-linux --split-dwarf-file=%t.dwo --split-dwarf-output=%t.dwo --filetype=obj -o /dev/null -stats 2>&1 | FileCheck %s --check-prefixes=SPLIT,CHECK +; RUN: llc %s -mtriple=x86_64-linux --filetype=obj -o /dev/null -stats 2>&1 | FileCheck %s --check-prefixes=NOTSPLIT,CHECK + +; NOTSPLIT-NOT: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file +; CHECK-DAG: {{[0-9]+}} elf-object-writer - Total size of debug info sections +; SPLIT-DAG: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file +; NOTSPLIT-NOT: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file + +define void @banana() !dbg !8 { + ret void, !dbg !12 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 11.0.1", isOptimized: true, runtimeVersion: 0, splitDebugFilename: "test.dwo", emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: GNU) +!1 = !DIFile(filename: "/tmp/test.c", directory: "/tmp") +!2 = !{} +!3 = !{i32 7, !"Dwarf Version", i32 4} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = !{i32 1, !"wchar_size", i32 4} +!6 = !{i32 7, !"PIC Level", i32 2} +!7 = !{!"clang version 11.0.1"} +!8 = distinct !DISubprogram(name: "banana", scope: !9, file: !9, line: 1, type: !10, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) +!9 = !DIFile(filename: "test.c", directory: "/tmp") +!10 = !DISubroutineType(types: !11) +!11 = !{null} +!12 = !DILocation(line: 1, column: 20, scope: !8) diff --git a/llvm/test/CodeGen/X86/isel-fcmp-x87.ll b/llvm/test/CodeGen/X86/isel-fcmp-x87.ll new file mode 100644 index 0000000000000..8c2a53082649a --- /dev/null +++ b/llvm/test/CodeGen/X86/isel-fcmp-x87.ll @@ -0,0 +1,1496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=X64,SDAG-X64 +; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=X64,FAST-X64 +; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=X86,SDAG-X86 +; Allow fast-isel to fallback to selection dag on x86 +; RUN: llc < %s -fast-isel -mtriple=i686-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=X86,FAST-X86 +; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -mattr=+x87,-sse,-sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=GISEL-X86 + + define i1 @fcmp_x86_fp80_oeq(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_oeq: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setnp %cl +; X64-NEXT: sete %al +; X64-NEXT: andb %cl, %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_oeq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: sete %cl +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: andb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_oeq: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %cl +; X86-NEXT: sete %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_oeq: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: setnp %al +; GISEL-X86-NEXT: andb %cl, %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp oeq x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ogt(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_ogt: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ogt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_ogt: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: seta %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ogt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: seta %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ogt x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_oge(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_oge: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_oge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_oge: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setae %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_oge: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setae %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp oge x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_olt(x86_fp80 %x, x86_fp80 %y) nounwind { +; SDAG-X64-LABEL: fcmp_x86_fp80_olt: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: seta %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_x86_fp80_olt: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: seta %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_olt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_x86_fp80_olt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: subl $12, %esp +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: seta %al +; SDAG-X86-NEXT: addl $12, %esp +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_x86_fp80_olt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: subl $12, %esp +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: seta %al +; FAST-X86-NEXT: addl $12, %esp +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_olt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: seta %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp olt x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ole(x86_fp80 %x, x86_fp80 %y) nounwind { +; SDAG-X64-LABEL: fcmp_x86_fp80_ole: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setae %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_x86_fp80_ole: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setae %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ole: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_x86_fp80_ole: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: subl $12, %esp +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setae %al +; SDAG-X86-NEXT: addl $12, %esp +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_x86_fp80_ole: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: subl $12, %esp +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setae %al +; FAST-X86-NEXT: addl $12, %esp +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ole: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setae %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ole x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_one(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_one: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setne %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_one: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setne %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_one: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setne %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_one: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setne %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp one x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ord(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_ord: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setnp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ord: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_ord: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ord: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setnp %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ord x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_uno(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_uno: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_uno: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_uno: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_uno: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setp %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp uno x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ueq(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_ueq: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ueq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: sete %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_ueq: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: sete %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ueq: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: sete %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ueq x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ugt(x86_fp80 %x, x86_fp80 %y) nounwind { +; SDAG-X64-LABEL: fcmp_x86_fp80_ugt: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setb %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_x86_fp80_ugt: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setb %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ugt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_x86_fp80_ugt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: subl $12, %esp +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setb %al +; SDAG-X86-NEXT: addl $12, %esp +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_x86_fp80_ugt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: subl $12, %esp +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setb %al +; FAST-X86-NEXT: addl $12, %esp +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ugt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setb %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ugt x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_uge(x86_fp80 %x, x86_fp80 %y) nounwind { +; SDAG-X64-LABEL: fcmp_x86_fp80_uge: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setbe %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_x86_fp80_uge: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setbe %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_uge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_x86_fp80_uge: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: subl $12, %esp +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldt {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setbe %al +; SDAG-X86-NEXT: addl $12, %esp +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_x86_fp80_uge: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: subl $12, %esp +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldt {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setbe %al +; FAST-X86-NEXT: addl $12, %esp +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_uge: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setbe %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp uge x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ult(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_ult: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ult: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_ult: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setb %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ult: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setb %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ult x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_ule(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_ule: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_ule: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_ule: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setbe %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_ule: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setbe %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp ule x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_x86_fp80_une(x86_fp80 %x, x86_fp80 %y) nounwind { +; X64-LABEL: fcmp_x86_fp80_une: +; X64: ## %bb.0: +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fldt {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setp %cl +; X64-NEXT: setne %al +; X64-NEXT: orb %cl, %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_x86_fp80_une: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setne %cl +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: orb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_x86_fp80_une: +; X86: ## %bb.0: +; X86-NEXT: subl $12, %esp +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %cl +; X86-NEXT: setne %al +; X86-NEXT: orb %cl, %al +; X86-NEXT: addl $12, %esp +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_x86_fp80_une: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setne %cl +; GISEL-X86-NEXT: setp %al +; GISEL-X86-NEXT: orb %cl, %al +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl + %1 = fcmp une x86_fp80 %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_oeq(float %x, float %y) { +; X64-LABEL: fcmp_float_oeq: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setnp %cl +; X64-NEXT: sete %al +; X64-NEXT: andb %cl, %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_oeq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: sete %cl +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: andb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_oeq: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %cl +; X86-NEXT: sete %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_oeq: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: setnp %al +; GISEL-X86-NEXT: andb %cl, %al +; GISEL-X86-NEXT: retl + %1 = fcmp oeq float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ogt(float %x, float %y) { +; X64-LABEL: fcmp_float_ogt: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ogt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ogt: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: seta %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ogt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: seta %al +; GISEL-X86-NEXT: retl + %1 = fcmp ogt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_oge(float %x, float %y) { +; X64-LABEL: fcmp_float_oge: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_oge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_oge: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setae %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_oge: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setae %al +; GISEL-X86-NEXT: retl + %1 = fcmp oge float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_olt(float %x, float %y) { +; SDAG-X64-LABEL: fcmp_float_olt: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: seta %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_float_olt: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: seta %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_olt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_olt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: seta %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_olt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: seta %al +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_olt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: seta %al +; GISEL-X86-NEXT: retl + %1 = fcmp olt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ole(float %x, float %y) { +; SDAG-X64-LABEL: fcmp_float_ole: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setae %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_float_ole: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setae %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ole: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_ole: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setae %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_ole: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setae %al +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ole: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setae %al +; GISEL-X86-NEXT: retl + %1 = fcmp ole float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_one(float %x, float %y) { +; X64-LABEL: fcmp_float_one: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setne %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_one: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setne %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_one: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_one: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setne %al +; GISEL-X86-NEXT: retl + %1 = fcmp one float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ord(float %x, float %y) { +; X64-LABEL: fcmp_float_ord: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setnp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ord: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ord: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ord: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setnp %al +; GISEL-X86-NEXT: retl + %1 = fcmp ord float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_uno(float %x, float %y) { +; X64-LABEL: fcmp_float_uno: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_uno: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_uno: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_uno: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setp %al +; GISEL-X86-NEXT: retl + %1 = fcmp uno float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ueq(float %x, float %y) { +; X64-LABEL: fcmp_float_ueq: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ueq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: sete %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ueq: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ueq: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: sete %al +; GISEL-X86-NEXT: retl + %1 = fcmp ueq float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ugt(float %x, float %y) { +; SDAG-X64-LABEL: fcmp_float_ugt: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setb %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_float_ugt: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setb %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ugt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_ugt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setb %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_ugt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setb %al +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ugt: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setb %al +; GISEL-X86-NEXT: retl + %1 = fcmp ugt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_uge(float %x, float %y) { +; SDAG-X64-LABEL: fcmp_float_uge: +; SDAG-X64: ## %bb.0: +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: flds {{[0-9]+}}(%rsp) +; SDAG-X64-NEXT: fucompi %st(1), %st +; SDAG-X64-NEXT: fstp %st(0) +; SDAG-X64-NEXT: setbe %al +; SDAG-X64-NEXT: retq +; +; FAST-X64-LABEL: fcmp_float_uge: +; FAST-X64: ## %bb.0: +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: flds {{[0-9]+}}(%rsp) +; FAST-X64-NEXT: fxch %st(1) +; FAST-X64-NEXT: fucompi %st(1), %st +; FAST-X64-NEXT: fstp %st(0) +; FAST-X64-NEXT: setbe %al +; FAST-X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_uge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_uge: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setbe %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_uge: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setbe %al +; FAST-X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_uge: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setbe %al +; GISEL-X86-NEXT: retl + %1 = fcmp uge float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ult(float %x, float %y) { +; X64-LABEL: fcmp_float_ult: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ult: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ult: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setb %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ult: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setb %al +; GISEL-X86-NEXT: retl + %1 = fcmp ult float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ule(float %x, float %y) { +; X64-LABEL: fcmp_float_ule: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ule: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ule: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setbe %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_ule: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setbe %al +; GISEL-X86-NEXT: retl + %1 = fcmp ule float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_une(float %x, float %y) { +; X64-LABEL: fcmp_float_une: +; X64: ## %bb.0: +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: flds {{[0-9]+}}(%rsp) +; X64-NEXT: fucompi %st(1), %st +; X64-NEXT: fstp %st(0) +; X64-NEXT: setp %cl +; X64-NEXT: setne %al +; X64-NEXT: orb %cl, %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_une: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: flds {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fxch %st(1) +; GISEL-X64-NEXT: fucompi %st(1), %st +; GISEL-X64-NEXT: fstp %st(0) +; GISEL-X64-NEXT: setne %cl +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: orb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_une: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %cl +; X86-NEXT: setne %al +; X86-NEXT: orb %cl, %al +; X86-NEXT: retl +; +; GISEL-X86-LABEL: fcmp_float_une: +; GISEL-X86: ## %bb.0: +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: flds {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fxch %st(1) +; GISEL-X86-NEXT: fucompi %st(1), %st +; GISEL-X86-NEXT: fstp %st(0) +; GISEL-X86-NEXT: setne %cl +; GISEL-X86-NEXT: setp %al +; GISEL-X86-NEXT: orb %cl, %al +; GISEL-X86-NEXT: retl + %1 = fcmp une float %x, %y + ret i1 %1 + } diff --git a/llvm/test/CodeGen/X86/isel-fcmp.ll b/llvm/test/CodeGen/X86/isel-fcmp.ll new file mode 100644 index 0000000000000..4a223aaa4149b --- /dev/null +++ b/llvm/test/CodeGen/X86/isel-fcmp.ll @@ -0,0 +1,888 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+sse,+sse2,-x87 -verify-machineinstrs | FileCheck %s --check-prefixes=X64,SDAG-X64 +; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 -mattr=+sse,+sse2,-x87 -verify-machineinstrs | FileCheck %s --check-prefixes=X64,FAST-X64 +; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-apple-darwin10 -mattr=+sse,+sse2,-x87 -verify-machineinstrs | FileCheck %s --check-prefixes=GISEL-X64 +; i686 with 64 bit store is issue. +; RUN: llc < %s -mtriple=i686-apple-darwin10 -mattr=-sse,-sse2,+x87 -verify-machineinstrs | FileCheck %s --check-prefixes=X86,SDAG-X86 +; Allow fast-isel to fallback to selection dag on x86 +; RUN: llc < %s -fast-isel -mtriple=i686-apple-darwin10 -mattr=-sse,-sse2,+x87 -verify-machineinstrs | FileCheck %s --check-prefixes=X86,FAST-X86 +; llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-apple-darwin10 -mattr=-sse,-sse2,+x87 -verify-machineinstrs | FileCheck %s --check-prefixes=GISEL-X86 + + define i1 @fcmp_float_oeq(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_oeq: +; X64: ## %bb.0: +; X64-NEXT: cmpeqss %xmm1, %xmm0 +; X64-NEXT: movd %xmm0, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: ## kill: def $al killed $al killed $eax +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_oeq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: sete %cl +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: andb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_oeq: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %cl +; X86-NEXT: sete %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl + %1 = fcmp oeq float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ogt(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ogt: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ogt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ogt: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: seta %al +; X86-NEXT: retl + %1 = fcmp ogt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_oge(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_oge: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_oge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_oge: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setae %al +; X86-NEXT: retl + %1 = fcmp oge float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_olt(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_olt: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm0, %xmm1 +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_olt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm0, %xmm1 +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_olt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: seta %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_olt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: seta %al +; FAST-X86-NEXT: retl + %1 = fcmp olt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ole(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ole: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm0, %xmm1 +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ole: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm0, %xmm1 +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_ole: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setae %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_ole: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setae %al +; FAST-X86-NEXT: retl + %1 = fcmp ole float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_one(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_one: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setne %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_one: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setne %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_one: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setne %al +; X86-NEXT: retl + %1 = fcmp one float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ord(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ord: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setnp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ord: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ord: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %al +; X86-NEXT: retl + %1 = fcmp ord float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_uno(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_uno: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_uno: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_uno: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %al +; X86-NEXT: retl + %1 = fcmp uno float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ueq(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ueq: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ueq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: sete %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ueq: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: sete %al +; X86-NEXT: retl + %1 = fcmp ueq float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ugt(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ugt: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm0, %xmm1 +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ugt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm0, %xmm1 +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_ugt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setb %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_ugt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setb %al +; FAST-X86-NEXT: retl + %1 = fcmp ugt float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_uge(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_uge: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm0, %xmm1 +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_uge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm0, %xmm1 +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_float_uge: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: flds {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setbe %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_float_uge: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: flds {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setbe %al +; FAST-X86-NEXT: retl + %1 = fcmp uge float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ult(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ult: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ult: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ult: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setb %al +; X86-NEXT: retl + %1 = fcmp ult float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_ule(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_ule: +; X64: ## %bb.0: +; X64-NEXT: ucomiss %xmm1, %xmm0 +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_ule: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_ule: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setbe %al +; X86-NEXT: retl + %1 = fcmp ule float %x, %y + ret i1 %1 + } + + define i1 @fcmp_float_une(float %x, float %y) nounwind { +; X64-LABEL: fcmp_float_une: +; X64: ## %bb.0: +; X64-NEXT: cmpneqss %xmm1, %xmm0 +; X64-NEXT: movd %xmm0, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: ## kill: def $al killed $al killed $eax +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_float_une: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomiss %xmm1, %xmm0 +; GISEL-X64-NEXT: setne %cl +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: orb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_float_une: +; X86: ## %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %cl +; X86-NEXT: setne %al +; X86-NEXT: orb %cl, %al +; X86-NEXT: retl + %1 = fcmp une float %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_oeq(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_oeq: +; X64: ## %bb.0: +; X64-NEXT: cmpeqsd %xmm1, %xmm0 +; X64-NEXT: movq %xmm0, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: ## kill: def $al killed $al killed $rax +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_oeq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: sete %cl +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: andb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_oeq: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %cl +; X86-NEXT: sete %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl + %1 = fcmp oeq double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ogt(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ogt: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ogt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_ogt: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: seta %al +; X86-NEXT: retl + %1 = fcmp ogt double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_oge(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_oge: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_oge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_oge: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setae %al +; X86-NEXT: retl + %1 = fcmp oge double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_olt(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_olt: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm0, %xmm1 +; X64-NEXT: seta %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_olt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm0, %xmm1 +; GISEL-X64-NEXT: seta %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_double_olt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: seta %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_double_olt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: seta %al +; FAST-X86-NEXT: retl + %1 = fcmp olt double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ole(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ole: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm0, %xmm1 +; X64-NEXT: setae %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ole: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm0, %xmm1 +; GISEL-X64-NEXT: setae %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_double_ole: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setae %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_double_ole: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setae %al +; FAST-X86-NEXT: retl + %1 = fcmp ole double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_one(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_one: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setne %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_one: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setne %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_one: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setne %al +; X86-NEXT: retl + %1 = fcmp one double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ord(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ord: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setnp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ord: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setnp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_ord: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setnp %al +; X86-NEXT: retl + %1 = fcmp ord double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_uno(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_uno: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setp %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_uno: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_uno: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %al +; X86-NEXT: retl + %1 = fcmp uno double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ueq(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ueq: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ueq: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: sete %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_ueq: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: sete %al +; X86-NEXT: retl + %1 = fcmp ueq double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ugt(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ugt: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm0, %xmm1 +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ugt: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm0, %xmm1 +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_double_ugt: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setb %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_double_ugt: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setb %al +; FAST-X86-NEXT: retl + %1 = fcmp ugt double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_uge(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_uge: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm0, %xmm1 +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_uge: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm0, %xmm1 +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; SDAG-X86-LABEL: fcmp_double_uge: +; SDAG-X86: ## %bb.0: +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fldl {{[0-9]+}}(%esp) +; SDAG-X86-NEXT: fucompp +; SDAG-X86-NEXT: fnstsw %ax +; SDAG-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; SDAG-X86-NEXT: sahf +; SDAG-X86-NEXT: setbe %al +; SDAG-X86-NEXT: retl +; +; FAST-X86-LABEL: fcmp_double_uge: +; FAST-X86: ## %bb.0: +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fldl {{[0-9]+}}(%esp) +; FAST-X86-NEXT: fxch %st(1) +; FAST-X86-NEXT: fucompp +; FAST-X86-NEXT: fnstsw %ax +; FAST-X86-NEXT: ## kill: def $ah killed $ah killed $ax +; FAST-X86-NEXT: sahf +; FAST-X86-NEXT: setbe %al +; FAST-X86-NEXT: retl + %1 = fcmp uge double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ult(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ult: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setb %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ult: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setb %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_ult: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setb %al +; X86-NEXT: retl + %1 = fcmp ult double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_ule(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_ule: +; X64: ## %bb.0: +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: setbe %al +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_ule: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setbe %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_ule: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setbe %al +; X86-NEXT: retl + %1 = fcmp ule double %x, %y + ret i1 %1 + } + + define i1 @fcmp_double_une(double %x, double %y) nounwind { +; X64-LABEL: fcmp_double_une: +; X64: ## %bb.0: +; X64-NEXT: cmpneqsd %xmm1, %xmm0 +; X64-NEXT: movq %xmm0, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: ## kill: def $al killed $al killed $rax +; X64-NEXT: retq +; +; GISEL-X64-LABEL: fcmp_double_une: +; GISEL-X64: ## %bb.0: +; GISEL-X64-NEXT: ucomisd %xmm1, %xmm0 +; GISEL-X64-NEXT: setne %cl +; GISEL-X64-NEXT: setp %al +; GISEL-X64-NEXT: orb %cl, %al +; GISEL-X64-NEXT: retq +; +; X86-LABEL: fcmp_double_une: +; X86: ## %bb.0: +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fldl {{[0-9]+}}(%esp) +; X86-NEXT: fucompp +; X86-NEXT: fnstsw %ax +; X86-NEXT: ## kill: def $ah killed $ah killed $ax +; X86-NEXT: sahf +; X86-NEXT: setp %cl +; X86-NEXT: setne %al +; X86-NEXT: orb %cl, %al +; X86-NEXT: retl + %1 = fcmp une double %x, %y + ret i1 %1 + } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; FAST-X64: {{.*}} +; SDAG-X64: {{.*}} diff --git a/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll new file mode 100644 index 0000000000000..78e810bb67f45 --- /dev/null +++ b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll @@ -0,0 +1,44 @@ +; RUN: llc %s -o - | FileCheck %s + +target triple = "x86_64-apple-macosx" + +declare swifttailcc void @tc_fn(ptr swiftasync, i64, ptr, i8, ptr swiftself) + +declare void @foo() + +; FIXME: Currently the return address is written to the stack before loading the +; argument from an aliasing stack slot. +define swifttailcc void @test(ptr %0, ptr swiftasync %1, i64 %2, i64 %3, ptr %4, ptr %5, i64 %6, ptr %7, i8 %8) { +; CHECK-LABEL: test: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: pushq %r15 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset %rbx, -24 +; CHECK-NEXT: .cfi_offset %r15, -16 +; CHECK-NEXT: movq %r9, %r13 +; CHECK-NEXT: movq %r8, %rbx +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; CHECK-NEXT: callq _foo +; CHECK-NEXT: movq %r14, (%rax) +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; CHECK-NEXT: movq %rcx, [[OFF:[0-9]+]](%rsp) +; CHECK-NEXT: movl [[OFF]](%rsp), %edx +; CHECK-NEXT: movq %rax, %r14 +; CHECK-NEXT: movq %r13, %rdi +; CHECK-NEXT: movq %r15, %rsi +; CHECK-NEXT: movq %rbx, %r13 +; CHECK-NEXT: addq $8, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %r15 +; CHECK-NEXT: addq $16, %rsp +; CHECK-NEXT: jmp _tc_fn ## TAILCALL +entry: + %res = tail call ptr @foo() + store ptr %1, ptr %res, align 8 + musttail call swifttailcc void @tc_fn(ptr swiftasync %res, i64 %6, ptr %7, i8 %8, ptr swiftself %5) + ret void +} diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s index e55fbfc6e18c8..857a1359b00d9 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s @@ -461,11 +461,11 @@ v_alignbyte_b32 v5, s1, v255, s3 v_alignbyte_b32 v5, s105, s105, s105 // GFX11: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] -v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 -// GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l +// GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] -v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 -// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l +// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 // GFX11: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -494,6 +494,9 @@ v_alignbyte_b32 v5, src_scc, vcc_lo, -1 v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null // GFX11: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h +// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + v_and_b16 v5.l, v1.l, v2.l // GFX11: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s index 3a3f6c178cbde..1864996b26028 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s @@ -363,22 +363,22 @@ v_alignbit_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bou v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 +v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1 @@ -387,7 +387,7 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1 v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15 // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 +v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 row_mask:0xf bank_mask:0xf // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff] v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15 @@ -405,6 +405,24 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bo v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, s3 row_half_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:1 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_shl:15 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x0f,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, m0 row_ror:1 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, m0 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xf6,0x01,0x01,0x21,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h row_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] // GFX11: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s index 1f9173518d412..300e5ef22f5ae 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s @@ -187,11 +187,11 @@ v_alignbit_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x16,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] -v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -220,6 +220,15 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +v_alignbyte_b32_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] + +v_alignbyte_b32_e64_dpp v5, v1, v2, m0 dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, m0 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xf6,0x01,0x01,0x77,0x39,0x05] + +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] // GFX11: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s index 016ad18637bbb..72d201e060df7 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s @@ -452,6 +452,9 @@ v_alignbyte_b32 v5, src_scc, vcc_lo, -1 v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null // GFX12: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h +// GFX12: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + v_and_b16 v5.l, v1.l, v2.l // GFX12: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s index d21c64f25f6a3..08be9225890c2 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s @@ -485,6 +485,9 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bo v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h row_mirror +// GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] // GFX12: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s index cae8423b3a655..0523b0fe8c9a4 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s @@ -288,6 +288,9 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h dpp8:[7,6,5,4,3,2,1,0] +// GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] // GFX12: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/ARM/vcvt-fixed-point-errors.s b/llvm/test/MC/ARM/vcvt-fixed-point-errors.s new file mode 100644 index 0000000000000..90e9da054a908 --- /dev/null +++ b/llvm/test/MC/ARM/vcvt-fixed-point-errors.s @@ -0,0 +1,51 @@ +// RUN: not llvm-mc -triple=armv8a-none-eabi -mattr=+fullfp16 < %s 2>&1 | FileCheck %s + + vcvt.u16.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u16.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u16.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.u16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.s16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.u32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.s32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.u16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.s16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.u32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.s32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.u16 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.s16 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.u32 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.s32 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt index fc0ae333b1745..b74128b21f563 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt @@ -508,10 +508,16 @@ # GFX11: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] 0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04 -# GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] 0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf -# GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] 0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01 # GFX11: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -540,6 +546,12 @@ 0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf # GFX11: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + 0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00 # W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] # W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt index 8a9ab3da5e4e0..8e7122b902326 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt @@ -228,19 +228,34 @@ # GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff # GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] @@ -269,6 +284,12 @@ 0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30 # GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] +0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + 0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt index 89f7c606152fb..f67eb32385407 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt @@ -135,10 +135,16 @@ # GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] 0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05 # GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -167,6 +173,12 @@ 0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00 # GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + 0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05 # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt index c2e1288c05cc4..3d5e78c86bc22 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt @@ -472,10 +472,16 @@ # GFX12: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] 0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04 -# GFX12: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] 0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf -# GFX12: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] 0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01 # GFX12: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -504,6 +510,12 @@ 0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf # GFX12: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + 0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00 # W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] # W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt index f0328615aacb0..c63c09b6b24c5 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt @@ -240,22 +240,40 @@ # GFX12: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff # GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] @@ -284,6 +302,12 @@ 0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30 # GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] +0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + 0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt index fc429adcbefb2..4182cd93f813c 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt @@ -147,13 +147,22 @@ # GFX12: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] 0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05 # GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -182,6 +191,12 @@ 0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00 # GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + 0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05 # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/ObjectYAML/DXContainer/RootSignature-Flags.yaml b/llvm/test/ObjectYAML/DXContainer/RootSignature-Flags.yaml new file mode 100644 index 0000000000000..b0a3e6945f454 --- /dev/null +++ b/llvm/test/ObjectYAML/DXContainer/RootSignature-Flags.yaml @@ -0,0 +1,33 @@ +# RUN: yaml2obj %s | obj2yaml | FileCheck %s + +--- !dxcontainer +Header: + Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ] + Version: + Major: 1 + Minor: 0 + PartCount: 1 + PartOffsets: [ 60 ] +Parts: + - Name: RTS0 + Size: 24 + RootSignature: + Version: 2 + NumParameters: 1 + RootParametersOffset: 3 + NumStaticSamplers: 4 + StaticSamplersOffset: 5 + AllowInputAssemblerInputLayout: true + DenyGeometryShaderRootAccess: true + +# CHECK: - Name: RTS0 +# CHECK-NEXT: Size: 24 +# CHECK-NEXT: RootSignature: +# CHECK-NEXT: Version: 2 +# CHECK-NEXT: NumParameters: 1 +# CHECK-NEXT: RootParametersOffset: 3 +# CHECK-NEXT: NumStaticSamplers: 4 +# CHECK-NEXT: StaticSamplersOffset: 5 +# CHECK-NEXT: AllowInputAssemblerInputLayout: true +# CHECK-NEXT: DenyGeometryShaderRootAccess: true diff --git a/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll b/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll new file mode 100644 index 0000000000000..effae2322dba3 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=indvars -S < %s | FileCheck %s + +define void @infer_via_ranges(ptr %arr, i32 %n) { +; CHECK-LABEL: define void @infer_via_ranges( +; CHECK-SAME: ptr [[ARR:%.*]], i32 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FIRST_ITR_CHECK:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[START:%.*]] = sub i32 [[N]], 1 +; CHECK-NEXT: br i1 [[FIRST_ITR_CHECK]], label %[[LOOP_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK: [[LOOP_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_DEC:%.*]], %[[IN_BOUNDS:.*]] ], [ [[START]], %[[LOOP_PREHEADER]] ] +; CHECK-NEXT: [[IDX_DEC]] = sub nsw i32 [[IDX]], 1 +; CHECK-NEXT: br i1 true, label %[[IN_BOUNDS]], label %[[OUT_OF_BOUNDS:.*]] +; CHECK: [[IN_BOUNDS]]: +; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX]] +; CHECK-NEXT: store i32 0, ptr [[ADDR]], align 4 +; CHECK-NEXT: [[NEXT:%.*]] = icmp sgt i32 [[IDX_DEC]], -1 +; CHECK-NEXT: br i1 [[NEXT]], label %[[LOOP]], label %[[EXIT_LOOPEXIT:.*]] +; CHECK: [[OUT_OF_BOUNDS]]: +; CHECK-NEXT: ret void +; CHECK: [[EXIT_LOOPEXIT]]: +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; + entry: + %first.itr.check = icmp sgt i32 %n, 0 + %start = sub i32 %n, 1 + br i1 %first.itr.check, label %loop, label %exit + + loop: + %idx = phi i32 [ %start, %entry ] , [ %idx.dec, %in.bounds ] + %idx.dec = sub i32 %idx, 1 + %abc = icmp sge i32 %idx, 0 + br i1 %abc, label %in.bounds, label %out.of.bounds + + in.bounds: + %addr = getelementptr i32, ptr %arr, i32 %idx + store i32 0, ptr %addr + %next = icmp sgt i32 %idx.dec, -1 + br i1 %next, label %loop, label %exit + + out.of.bounds: + ret void + + exit: + ret void +} diff --git a/llvm/test/Transforms/IndVarSimplify/pr126012.ll b/llvm/test/Transforms/IndVarSimplify/pr126012.ll new file mode 100644 index 0000000000000..725ea89b8e651 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/pr126012.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=indvars < %s | FileCheck %s + +; FIXME: This is a miscompile. +define i32 @test() { +; CHECK-LABEL: define i32 @test() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_PREHEADER:.*]] +; CHECK: [[FOR_PREHEADER]]: +; CHECK-NEXT: [[INDVAR1:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PHI:%.*]], %[[FOR_INC:.*]] ] +; CHECK-NEXT: [[INDVAR3:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_INC]] ] +; CHECK-NEXT: [[COND1:%.*]] = icmp eq i32 [[INDVAR3]], 0 +; CHECK-NEXT: br i1 [[COND1]], label %[[FOR_INC]], label %[[FOR_END:.*]] +; CHECK: [[FOR_END]]: +; CHECK-NEXT: [[EXT:%.*]] = zext i1 true to i32 +; CHECK-NEXT: br label %[[FOR_INC]] +; CHECK: [[FOR_INC]]: +; CHECK-NEXT: [[PHI]] = phi i32 [ [[EXT]], %[[FOR_END]] ], [ 0, %[[FOR_PREHEADER]] ] +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[INDVAR3]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVAR3]], 2 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_EXIT:.*]], label %[[FOR_PREHEADER]] +; CHECK: [[FOR_EXIT]]: +; CHECK-NEXT: [[INDVAR1_LCSSA:%.*]] = phi i32 [ [[INDVAR1]], %[[FOR_INC]] ] +; CHECK-NEXT: ret i32 [[INDVAR1_LCSSA]] +; +entry: + br label %for.preheader + +for.preheader: + %indvar1 = phi i32 [ 0, %entry ], [ %phi, %for.inc ] + %indvar2 = phi i32 [ 1, %entry ], [ %indvar3, %for.inc ] + %indvar3 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] + %cond1 = icmp eq i32 %indvar3, 0 + br i1 %cond1, label %for.inc, label %for.end + +for.end: + %cmp = icmp sgt i32 %indvar2, 0 + %ext = zext i1 %cmp to i32 + br label %for.inc + +for.inc: + %phi = phi i32 [ %ext, %for.end ], [ 0, %for.preheader ] + %inc = add i32 %indvar3, 1 + %exitcond = icmp eq i32 %indvar3, 2 + br i1 %exitcond, label %for.exit, label %for.preheader + +for.exit: + ret i32 %indvar1 +} diff --git a/llvm/test/Transforms/InstCombine/sign-test-and-or.ll b/llvm/test/Transforms/InstCombine/sign-test-and-or.ll index 65363620563be..3e9ff63869d64 100644 --- a/llvm/test/Transforms/InstCombine/sign-test-and-or.ll +++ b/llvm/test/Transforms/InstCombine/sign-test-and-or.ll @@ -349,6 +349,29 @@ define i1 @test9_logical(i32 %a) { ret i1 %or.cond } +define i1 @test9_logical_samesign(i32 %a) { +; CHECK-LABEL: @test9_logical_samesign( +; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[A:%.*]], -1 +; CHECK-NEXT: ret i1 [[CMP2]] +; + %masked = and i32 %a, -1073741825 + %cmp1 = icmp eq i32 %masked, 0 + %cmp2 = icmp samesign sgt i32 %a, -1 + %or.cond = select i1 %cmp1, i1 true, i1 %cmp2 + ret i1 %or.cond +} + +define i1 @test_logical_or_icmp_icmp_samesign(i32 %a) { +; CHECK-LABEL: @test_logical_or_icmp_icmp_samesign( +; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[A:%.*]], -1 +; CHECK-NEXT: ret i1 [[CMP2]] +; + %cmp1 = icmp eq i32 %a, 0 + %cmp2 = icmp samesign sgt i32 %a, -1 + %or = select i1 %cmp1, i1 true, i1 %cmp2 + ret i1 %or +} + define i1 @test10(i32 %a) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[OR_COND:%.*]] = icmp ult i32 [[A:%.*]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll index 61bbae0b3f16a..5b0f0961a6297 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -39,7 +39,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -177,7 +177,7 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP12]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR3]] +; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR4]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: ; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP13]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] @@ -339,10 +339,10 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP13]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP14:%.*]] = call i64 @foo(i64 [[TMP13]]) #[[ATTR4:[0-9]+]] +; TFNONE-NEXT: [[TMP14:%.*]] = call i64 @foo(i64 [[TMP13]]) #[[ATTR5:[0-9]+]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.else: -; TFNONE-NEXT: [[TMP15:%.*]] = call i64 @foo(i64 0) #[[ATTR4]] +; TFNONE-NEXT: [[TMP15:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: ; TFNONE-NEXT: [[TMP16:%.*]] = phi i64 [ [[TMP14]], [[IF_THEN]] ], [ [[TMP15]], [[IF_ELSE]] ] @@ -509,7 +509,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -525,7 +525,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -557,7 +557,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -573,7 +573,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFA_INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] ; TFA_INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFA_INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFA_INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFA_INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] ; TFA_INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFA_INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -634,7 +634,7 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -806,7 +806,7 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFNONE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8 ; TFNONE-NEXT: [[MULADD]] = tail call double @llvm.fmuladd.f64(double [[LOAD]], double [[M]], double [[FMA_SUM]]) ; TFNONE-NEXT: [[TOINT:%.*]] = fptoui double [[LOAD]] to i64 -; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[TOINT]]) #[[ATTR3]] +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[TOINT]]) #[[ATTR4]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -963,16 +963,266 @@ for.cond.cleanup: ret double %muladd } + +define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { +; TFNONE-LABEL: @test_widen_exp_v2( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; TFNONE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP7:%.*]] = load double, ptr [[P2:%.*]], align 8 +; TFNONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[TMP7]], i64 0 +; TFNONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; TFNONE-NEXT: [[TMP8:%.*]] = call @exp_masked_scalable( [[BROADCAST_SPLAT]], splat (i1 true)) +; TFNONE-NEXT: [[TMP9:%.*]] = fcmp ogt [[TMP8]], zeroinitializer +; TFNONE-NEXT: [[TMP10:%.*]] = xor [[TMP9]], splat (i1 true) +; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], splat (double 1.000000e+00), zeroinitializer +; TFNONE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() +; TFNONE-NEXT: [[TMP12:%.*]] = mul i32 [[TMP11]], 2 +; TFNONE-NEXT: [[TMP13:%.*]] = sub i32 [[TMP12]], 1 +; TFNONE-NEXT: [[TMP14:%.*]] = extractelement [[PREDPHI]], i32 [[TMP13]] +; TFNONE-NEXT: store double [[TMP14]], ptr [[P:%.*]], align 8 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; TFNONE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: br label [[LOOP:%.*]] +; TFNONE: loop: +; TFNONE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_END:%.*]] ] +; TFNONE-NEXT: [[LD:%.*]] = load double, ptr [[P2]], align 8 +; TFNONE-NEXT: [[EXP:%.*]] = tail call double @llvm.exp.f64(double [[LD]]) #[[ATTR8:[0-9]+]] +; TFNONE-NEXT: [[COND1:%.*]] = fcmp ogt double [[EXP]], 0.000000e+00 +; TFNONE-NEXT: br i1 [[COND1]], label [[LOOP_MIDDLE:%.*]], label [[LOOP_END]] +; TFNONE: loop.middle: +; TFNONE-NEXT: br label [[LOOP_END]] +; TFNONE: loop.end: +; TFNONE-NEXT: [[SINK:%.*]] = phi double [ 0.000000e+00, [[LOOP_MIDDLE]] ], [ 1.000000e+00, [[LOOP]] ] +; TFNONE-NEXT: store double [[SINK]], ptr [[P]], align 8 +; TFNONE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; TFNONE-NEXT: [[COND2:%.*]] = icmp eq i64 [[IV]], [[N]] +; TFNONE-NEXT: br i1 [[COND2]], label [[END]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] +; TFNONE: end: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_widen_exp_v2( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 1 +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 [[TMP1]], i64 0 +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 0, i64 [[TMP0]]) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_STORE_CONTINUE2]] ] +; TFALWAYS-NEXT: [[TMP4:%.*]] = load double, ptr [[P2:%.*]], align 8 +; TFALWAYS-NEXT: [[TMP5:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7:[0-9]+]] +; TFALWAYS-NEXT: [[TMP6:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7]] +; TFALWAYS-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[TMP5]], i32 0 +; TFALWAYS-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[TMP6]], i32 1 +; TFALWAYS-NEXT: [[TMP9:%.*]] = fcmp ogt <2 x double> [[TMP8]], zeroinitializer +; TFALWAYS-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP9]], splat (i1 true) +; TFALWAYS-NEXT: [[TMP11:%.*]] = select <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i1> [[TMP10]], <2 x i1> zeroinitializer +; TFALWAYS-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP11]], <2 x double> splat (double 1.000000e+00), <2 x double> zeroinitializer +; TFALWAYS-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; TFALWAYS: pred.store.if: +; TFALWAYS-NEXT: [[TMP13:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 0 +; TFALWAYS-NEXT: store double [[TMP13]], ptr [[P:%.*]], align 8 +; TFALWAYS-NEXT: br label [[PRED_STORE_CONTINUE]] +; TFALWAYS: pred.store.continue: +; TFALWAYS-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 +; TFALWAYS-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] +; TFALWAYS: pred.store.if1: +; TFALWAYS-NEXT: [[TMP15:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 1 +; TFALWAYS-NEXT: store double [[TMP15]], ptr [[P]], align 8 +; TFALWAYS-NEXT: br label [[PRED_STORE_CONTINUE2]] +; TFALWAYS: pred.store.continue2: +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP3]]) +; TFALWAYS-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; TFALWAYS-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP16]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP17]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFALWAYS: end: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_widen_exp_v2( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 1 +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 [[TMP1]], i64 0 +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 0, i64 [[TMP0]]) +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_STORE_CONTINUE2]] ] +; TFFALLBACK-NEXT: [[TMP4:%.*]] = load double, ptr [[P2:%.*]], align 8 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7:[0-9]+]] +; TFFALLBACK-NEXT: [[TMP6:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7]] +; TFFALLBACK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[TMP5]], i32 0 +; TFFALLBACK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[TMP6]], i32 1 +; TFFALLBACK-NEXT: [[TMP9:%.*]] = fcmp ogt <2 x double> [[TMP8]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP9]], splat (i1 true) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = select <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i1> [[TMP10]], <2 x i1> zeroinitializer +; TFFALLBACK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP11]], <2 x double> splat (double 1.000000e+00), <2 x double> zeroinitializer +; TFFALLBACK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; TFFALLBACK: pred.store.if: +; TFFALLBACK-NEXT: [[TMP13:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 0 +; TFFALLBACK-NEXT: store double [[TMP13]], ptr [[P:%.*]], align 8 +; TFFALLBACK-NEXT: br label [[PRED_STORE_CONTINUE]] +; TFFALLBACK: pred.store.continue: +; TFFALLBACK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 +; TFFALLBACK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] +; TFFALLBACK: pred.store.if1: +; TFFALLBACK-NEXT: [[TMP15:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 1 +; TFFALLBACK-NEXT: store double [[TMP15]], ptr [[P]], align 8 +; TFFALLBACK-NEXT: br label [[PRED_STORE_CONTINUE2]] +; TFFALLBACK: pred.store.continue2: +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP3]]) +; TFFALLBACK-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; TFFALLBACK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP16]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP17]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TFFALLBACK: end: +; TFFALLBACK-NEXT: ret void +; +; TFA_INTERLEAVE-LABEL: @test_widen_exp_v2( +; TFA_INTERLEAVE-NEXT: entry: +; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3 +; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], 4 +; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP0]], 4 +; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 [[TMP1]], i64 0 +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 0, i64 [[TMP0]]) +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 2, i64 [[TMP0]]) +; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFA_INTERLEAVE: vector.body: +; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE9:%.*]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_STORE_CONTINUE9]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT10:%.*]], [[PRED_STORE_CONTINUE9]] ] +; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = load double, ptr [[P2:%.*]], align 8 +; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7:[0-9]+]] +; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7]] +; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[TMP5]], i32 0 +; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[TMP6]], i32 1 +; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7]] +; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7]] +; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = insertelement <2 x double> poison, double [[TMP9]], i32 0 +; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[TMP10]], i32 1 +; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = fcmp ogt <2 x double> [[TMP8]], zeroinitializer +; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = fcmp ogt <2 x double> [[TMP12]], zeroinitializer +; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = xor <2 x i1> [[TMP13]], splat (i1 true) +; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[TMP14]], splat (i1 true) +; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = select <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i1> [[TMP15]], <2 x i1> zeroinitializer +; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = select <2 x i1> [[ACTIVE_LANE_MASK2]], <2 x i1> [[TMP16]], <2 x i1> zeroinitializer +; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP17]], <2 x double> splat (double 1.000000e+00), <2 x double> zeroinitializer +; TFA_INTERLEAVE-NEXT: [[PREDPHI3:%.*]] = select <2 x i1> [[TMP18]], <2 x double> splat (double 1.000000e+00), <2 x double> zeroinitializer +; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 +; TFA_INTERLEAVE-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; TFA_INTERLEAVE: pred.store.if: +; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 0 +; TFA_INTERLEAVE-NEXT: store double [[TMP20]], ptr [[P:%.*]], align 8 +; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE]] +; TFA_INTERLEAVE: pred.store.continue: +; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 +; TFA_INTERLEAVE-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] +; TFA_INTERLEAVE: pred.store.if4: +; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 1 +; TFA_INTERLEAVE-NEXT: store double [[TMP22]], ptr [[P]], align 8 +; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE5]] +; TFA_INTERLEAVE: pred.store.continue5: +; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 0 +; TFA_INTERLEAVE-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] +; TFA_INTERLEAVE: pred.store.if6: +; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 0 +; TFA_INTERLEAVE-NEXT: store double [[TMP24]], ptr [[P]], align 8 +; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE7]] +; TFA_INTERLEAVE: pred.store.continue7: +; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 1 +; TFA_INTERLEAVE-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]] +; TFA_INTERLEAVE: pred.store.if8: +; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 1 +; TFA_INTERLEAVE-NEXT: store double [[TMP26]], ptr [[P]], align 8 +; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE9]] +; TFA_INTERLEAVE: pred.store.continue9: +; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 2 +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP3]]) +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT10]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[TMP27]], i64 [[TMP3]]) +; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = extractelement <2 x i1> [[TMP28]], i32 0 +; TFA_INTERLEAVE-NEXT: br i1 [[TMP29]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFA_INTERLEAVE: end: +; TFA_INTERLEAVE-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.end ] + %ld = load double, ptr %p2, align 8 + %exp = tail call double @llvm.exp.f64(double %ld) #6 + %cond1 = fcmp ogt double %exp, 0.000000e+00 + br i1 %cond1, label %loop.middle, label %loop.end + +loop.middle: + br label %loop.end + +loop.end: + %sink = phi double [ 0.000000e+00, %loop.middle ], [ 1.000000e+00, %loop ] + store double %sink, ptr %p, align 8 + %iv.next = add i64 %iv, 1 + %cond2 = icmp eq i64 %iv, %n + br i1 %cond2, label %end, label %loop + +end: + ret void +} + + declare i64 @foo(i64) declare double @llvm.fmuladd.f64(double, double, double) +declare double @llvm.exp.f64(double) + +; fixed-width variants of exp +declare <2 x double> @exp_fixed(<2 x double>) -;; scalable vector variants of foo +;; scalable vector variants of foo and exp declare @foo_uniform(i64, ) declare @foo_vector(, ) declare @foo_vector_nomask() +declare @exp_masked_scalable(, ) attributes #0 = { nounwind "vector-function-abi-variant"="_ZGVsMxv_foo(foo_vector),_ZGVsMxu_foo(foo_uniform)" } attributes #1 = { nounwind "vector-function-abi-variant"="_ZGVsMxv_foo(foo_vector)" } attributes #2 = { nounwind "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vector_nomask)" } attributes #3 = { nounwind "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vector_nomask),_ZGVsMxv_foo(foo_vector)" } attributes #4 = { "target-features"="+sve" vscale_range(2,16) "no-trapping-math"="false" } +attributes #5 = { "target-cpu"="neoverse-v2" vscale_range(1,16) } +attributes #6 = { "vector-function-abi-variant"="_ZGV_LLVM_N2v_llvm.exp.f64(exp_fixed),_ZGVsMxv_llvm.exp.f64(exp_masked_scalable)" } diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 37c489cd0d4cf..bd9a0fafb1e1c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -230,6 +230,389 @@ while.end.loopexit: ; preds = %while.body ret void } +define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { +; CHECK-LABEL: define i32 @dotp_predicated( +; CHECK-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 15 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE62:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE62]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[PRED_LOAD_CONTINUE62]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; CHECK-NEXT: [[TMP16:%.*]] = icmp ule <16 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 +; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] +; CHECK: pred.load.if: +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> poison, i8 [[TMP19]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]] +; CHECK: pred.load.continue: +; CHECK-NEXT: [[TMP21:%.*]] = phi <16 x i8> [ poison, [[VECTOR_BODY]] ], [ [[TMP20]], [[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP22:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 +; CHECK-NEXT: br i1 [[TMP22]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] +; CHECK: pred.load.if1: +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP24]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]] +; CHECK: pred.load.continue2: +; CHECK-NEXT: [[TMP26:%.*]] = phi <16 x i8> [ [[TMP21]], [[PRED_LOAD_CONTINUE]] ], [ [[TMP25]], [[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[TMP27:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 +; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] +; CHECK: pred.load.if3: +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP29]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]] +; CHECK: pred.load.continue4: +; CHECK-NEXT: [[TMP31:%.*]] = phi <16 x i8> [ [[TMP26]], [[PRED_LOAD_CONTINUE2]] ], [ [[TMP30]], [[PRED_LOAD_IF3]] ] +; CHECK-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 +; CHECK-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]] +; CHECK: pred.load.if5: +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP33]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP31]], i8 [[TMP34]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]] +; CHECK: pred.load.continue6: +; CHECK-NEXT: [[TMP36:%.*]] = phi <16 x i8> [ [[TMP31]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP35]], [[PRED_LOAD_IF5]] ] +; CHECK-NEXT: [[TMP37:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 +; CHECK-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]] +; CHECK: pred.load.if7: +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1 +; CHECK-NEXT: [[TMP40:%.*]] = insertelement <16 x i8> [[TMP36]], i8 [[TMP39]], i32 4 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE8]] +; CHECK: pred.load.continue8: +; CHECK-NEXT: [[TMP41:%.*]] = phi <16 x i8> [ [[TMP36]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP40]], [[PRED_LOAD_IF7]] ] +; CHECK-NEXT: [[TMP42:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 +; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]] +; CHECK: pred.load.if9: +; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP44:%.*]] = load i8, ptr [[TMP43]], align 1 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <16 x i8> [[TMP41]], i8 [[TMP44]], i32 5 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE10]] +; CHECK: pred.load.continue10: +; CHECK-NEXT: [[TMP46:%.*]] = phi <16 x i8> [ [[TMP41]], [[PRED_LOAD_CONTINUE8]] ], [ [[TMP45]], [[PRED_LOAD_IF9]] ] +; CHECK-NEXT: [[TMP47:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 +; CHECK-NEXT: br i1 [[TMP47]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]] +; CHECK: pred.load.if11: +; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP48]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = insertelement <16 x i8> [[TMP46]], i8 [[TMP49]], i32 6 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE12]] +; CHECK: pred.load.continue12: +; CHECK-NEXT: [[TMP51:%.*]] = phi <16 x i8> [ [[TMP46]], [[PRED_LOAD_CONTINUE10]] ], [ [[TMP50]], [[PRED_LOAD_IF11]] ] +; CHECK-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 +; CHECK-NEXT: br i1 [[TMP52]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]] +; CHECK: pred.load.if13: +; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <16 x i8> [[TMP51]], i8 [[TMP54]], i32 7 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE14]] +; CHECK: pred.load.continue14: +; CHECK-NEXT: [[TMP56:%.*]] = phi <16 x i8> [ [[TMP51]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP55]], [[PRED_LOAD_IF13]] ] +; CHECK-NEXT: [[TMP57:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 +; CHECK-NEXT: br i1 [[TMP57]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] +; CHECK: pred.load.if15: +; CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <16 x i8> [[TMP56]], i8 [[TMP59]], i32 8 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE16]] +; CHECK: pred.load.continue16: +; CHECK-NEXT: [[TMP61:%.*]] = phi <16 x i8> [ [[TMP56]], [[PRED_LOAD_CONTINUE14]] ], [ [[TMP60]], [[PRED_LOAD_IF15]] ] +; CHECK-NEXT: [[TMP62:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 +; CHECK-NEXT: br i1 [[TMP62]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]] +; CHECK: pred.load.if17: +; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP64:%.*]] = load i8, ptr [[TMP63]], align 1 +; CHECK-NEXT: [[TMP65:%.*]] = insertelement <16 x i8> [[TMP61]], i8 [[TMP64]], i32 9 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE18]] +; CHECK: pred.load.continue18: +; CHECK-NEXT: [[TMP66:%.*]] = phi <16 x i8> [ [[TMP61]], [[PRED_LOAD_CONTINUE16]] ], [ [[TMP65]], [[PRED_LOAD_IF17]] ] +; CHECK-NEXT: [[TMP67:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 +; CHECK-NEXT: br i1 [[TMP67]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]] +; CHECK: pred.load.if19: +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP69:%.*]] = load i8, ptr [[TMP68]], align 1 +; CHECK-NEXT: [[TMP70:%.*]] = insertelement <16 x i8> [[TMP66]], i8 [[TMP69]], i32 10 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE20]] +; CHECK: pred.load.continue20: +; CHECK-NEXT: [[TMP71:%.*]] = phi <16 x i8> [ [[TMP66]], [[PRED_LOAD_CONTINUE18]] ], [ [[TMP70]], [[PRED_LOAD_IF19]] ] +; CHECK-NEXT: [[TMP72:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 +; CHECK-NEXT: br i1 [[TMP72]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]] +; CHECK: pred.load.if21: +; CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP74:%.*]] = load i8, ptr [[TMP73]], align 1 +; CHECK-NEXT: [[TMP75:%.*]] = insertelement <16 x i8> [[TMP71]], i8 [[TMP74]], i32 11 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE22]] +; CHECK: pred.load.continue22: +; CHECK-NEXT: [[TMP76:%.*]] = phi <16 x i8> [ [[TMP71]], [[PRED_LOAD_CONTINUE20]] ], [ [[TMP75]], [[PRED_LOAD_IF21]] ] +; CHECK-NEXT: [[TMP77:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 +; CHECK-NEXT: br i1 [[TMP77]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]] +; CHECK: pred.load.if23: +; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP79:%.*]] = load i8, ptr [[TMP78]], align 1 +; CHECK-NEXT: [[TMP80:%.*]] = insertelement <16 x i8> [[TMP76]], i8 [[TMP79]], i32 12 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE24]] +; CHECK: pred.load.continue24: +; CHECK-NEXT: [[TMP81:%.*]] = phi <16 x i8> [ [[TMP76]], [[PRED_LOAD_CONTINUE22]] ], [ [[TMP80]], [[PRED_LOAD_IF23]] ] +; CHECK-NEXT: [[TMP82:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 +; CHECK-NEXT: br i1 [[TMP82]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]] +; CHECK: pred.load.if25: +; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP84:%.*]] = load i8, ptr [[TMP83]], align 1 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <16 x i8> [[TMP81]], i8 [[TMP84]], i32 13 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE26]] +; CHECK: pred.load.continue26: +; CHECK-NEXT: [[TMP86:%.*]] = phi <16 x i8> [ [[TMP81]], [[PRED_LOAD_CONTINUE24]] ], [ [[TMP85]], [[PRED_LOAD_IF25]] ] +; CHECK-NEXT: [[TMP87:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 +; CHECK-NEXT: br i1 [[TMP87]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]] +; CHECK: pred.load.if27: +; CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP89:%.*]] = load i8, ptr [[TMP88]], align 1 +; CHECK-NEXT: [[TMP90:%.*]] = insertelement <16 x i8> [[TMP86]], i8 [[TMP89]], i32 14 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE28]] +; CHECK: pred.load.continue28: +; CHECK-NEXT: [[TMP91:%.*]] = phi <16 x i8> [ [[TMP86]], [[PRED_LOAD_CONTINUE26]] ], [ [[TMP90]], [[PRED_LOAD_IF27]] ] +; CHECK-NEXT: [[TMP92:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 +; CHECK-NEXT: br i1 [[TMP92]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]] +; CHECK: pred.load.if29: +; CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP94:%.*]] = load i8, ptr [[TMP93]], align 1 +; CHECK-NEXT: [[TMP95:%.*]] = insertelement <16 x i8> [[TMP91]], i8 [[TMP94]], i32 15 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE30]] +; CHECK: pred.load.continue30: +; CHECK-NEXT: [[TMP96:%.*]] = phi <16 x i8> [ [[TMP91]], [[PRED_LOAD_CONTINUE28]] ], [ [[TMP95]], [[PRED_LOAD_IF29]] ] +; CHECK-NEXT: [[TMP97:%.*]] = sext <16 x i8> [[TMP96]] to <16 x i32> +; CHECK-NEXT: [[TMP98:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 +; CHECK-NEXT: br i1 [[TMP98]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]] +; CHECK: pred.load.if31: +; CHECK-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP100:%.*]] = load i8, ptr [[TMP99]], align 1 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <16 x i8> poison, i8 [[TMP100]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE32]] +; CHECK: pred.load.continue32: +; CHECK-NEXT: [[TMP102:%.*]] = phi <16 x i8> [ poison, [[PRED_LOAD_CONTINUE30]] ], [ [[TMP101]], [[PRED_LOAD_IF31]] ] +; CHECK-NEXT: [[TMP103:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 +; CHECK-NEXT: br i1 [[TMP103]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]] +; CHECK: pred.load.if33: +; CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP105:%.*]] = load i8, ptr [[TMP104]], align 1 +; CHECK-NEXT: [[TMP106:%.*]] = insertelement <16 x i8> [[TMP102]], i8 [[TMP105]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE34]] +; CHECK: pred.load.continue34: +; CHECK-NEXT: [[TMP107:%.*]] = phi <16 x i8> [ [[TMP102]], [[PRED_LOAD_CONTINUE32]] ], [ [[TMP106]], [[PRED_LOAD_IF33]] ] +; CHECK-NEXT: [[TMP108:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 +; CHECK-NEXT: br i1 [[TMP108]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36:%.*]] +; CHECK: pred.load.if35: +; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP110:%.*]] = load i8, ptr [[TMP109]], align 1 +; CHECK-NEXT: [[TMP111:%.*]] = insertelement <16 x i8> [[TMP107]], i8 [[TMP110]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE36]] +; CHECK: pred.load.continue36: +; CHECK-NEXT: [[TMP112:%.*]] = phi <16 x i8> [ [[TMP107]], [[PRED_LOAD_CONTINUE34]] ], [ [[TMP111]], [[PRED_LOAD_IF35]] ] +; CHECK-NEXT: [[TMP113:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 +; CHECK-NEXT: br i1 [[TMP113]], label [[PRED_LOAD_IF37:%.*]], label [[PRED_LOAD_CONTINUE38:%.*]] +; CHECK: pred.load.if37: +; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP115:%.*]] = load i8, ptr [[TMP114]], align 1 +; CHECK-NEXT: [[TMP116:%.*]] = insertelement <16 x i8> [[TMP112]], i8 [[TMP115]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE38]] +; CHECK: pred.load.continue38: +; CHECK-NEXT: [[TMP117:%.*]] = phi <16 x i8> [ [[TMP112]], [[PRED_LOAD_CONTINUE36]] ], [ [[TMP116]], [[PRED_LOAD_IF37]] ] +; CHECK-NEXT: [[TMP118:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 +; CHECK-NEXT: br i1 [[TMP118]], label [[PRED_LOAD_IF39:%.*]], label [[PRED_LOAD_CONTINUE40:%.*]] +; CHECK: pred.load.if39: +; CHECK-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP120:%.*]] = load i8, ptr [[TMP119]], align 1 +; CHECK-NEXT: [[TMP121:%.*]] = insertelement <16 x i8> [[TMP117]], i8 [[TMP120]], i32 4 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE40]] +; CHECK: pred.load.continue40: +; CHECK-NEXT: [[TMP122:%.*]] = phi <16 x i8> [ [[TMP117]], [[PRED_LOAD_CONTINUE38]] ], [ [[TMP121]], [[PRED_LOAD_IF39]] ] +; CHECK-NEXT: [[TMP123:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 +; CHECK-NEXT: br i1 [[TMP123]], label [[PRED_LOAD_IF41:%.*]], label [[PRED_LOAD_CONTINUE42:%.*]] +; CHECK: pred.load.if41: +; CHECK-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP125:%.*]] = load i8, ptr [[TMP124]], align 1 +; CHECK-NEXT: [[TMP126:%.*]] = insertelement <16 x i8> [[TMP122]], i8 [[TMP125]], i32 5 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE42]] +; CHECK: pred.load.continue42: +; CHECK-NEXT: [[TMP127:%.*]] = phi <16 x i8> [ [[TMP122]], [[PRED_LOAD_CONTINUE40]] ], [ [[TMP126]], [[PRED_LOAD_IF41]] ] +; CHECK-NEXT: [[TMP128:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 +; CHECK-NEXT: br i1 [[TMP128]], label [[PRED_LOAD_IF43:%.*]], label [[PRED_LOAD_CONTINUE44:%.*]] +; CHECK: pred.load.if43: +; CHECK-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP130:%.*]] = load i8, ptr [[TMP129]], align 1 +; CHECK-NEXT: [[TMP131:%.*]] = insertelement <16 x i8> [[TMP127]], i8 [[TMP130]], i32 6 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE44]] +; CHECK: pred.load.continue44: +; CHECK-NEXT: [[TMP132:%.*]] = phi <16 x i8> [ [[TMP127]], [[PRED_LOAD_CONTINUE42]] ], [ [[TMP131]], [[PRED_LOAD_IF43]] ] +; CHECK-NEXT: [[TMP133:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 +; CHECK-NEXT: br i1 [[TMP133]], label [[PRED_LOAD_IF45:%.*]], label [[PRED_LOAD_CONTINUE46:%.*]] +; CHECK: pred.load.if45: +; CHECK-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP135:%.*]] = load i8, ptr [[TMP134]], align 1 +; CHECK-NEXT: [[TMP136:%.*]] = insertelement <16 x i8> [[TMP132]], i8 [[TMP135]], i32 7 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE46]] +; CHECK: pred.load.continue46: +; CHECK-NEXT: [[TMP137:%.*]] = phi <16 x i8> [ [[TMP132]], [[PRED_LOAD_CONTINUE44]] ], [ [[TMP136]], [[PRED_LOAD_IF45]] ] +; CHECK-NEXT: [[TMP138:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 +; CHECK-NEXT: br i1 [[TMP138]], label [[PRED_LOAD_IF47:%.*]], label [[PRED_LOAD_CONTINUE48:%.*]] +; CHECK: pred.load.if47: +; CHECK-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP140:%.*]] = load i8, ptr [[TMP139]], align 1 +; CHECK-NEXT: [[TMP141:%.*]] = insertelement <16 x i8> [[TMP137]], i8 [[TMP140]], i32 8 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE48]] +; CHECK: pred.load.continue48: +; CHECK-NEXT: [[TMP142:%.*]] = phi <16 x i8> [ [[TMP137]], [[PRED_LOAD_CONTINUE46]] ], [ [[TMP141]], [[PRED_LOAD_IF47]] ] +; CHECK-NEXT: [[TMP143:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 +; CHECK-NEXT: br i1 [[TMP143]], label [[PRED_LOAD_IF49:%.*]], label [[PRED_LOAD_CONTINUE50:%.*]] +; CHECK: pred.load.if49: +; CHECK-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP145:%.*]] = load i8, ptr [[TMP144]], align 1 +; CHECK-NEXT: [[TMP146:%.*]] = insertelement <16 x i8> [[TMP142]], i8 [[TMP145]], i32 9 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE50]] +; CHECK: pred.load.continue50: +; CHECK-NEXT: [[TMP147:%.*]] = phi <16 x i8> [ [[TMP142]], [[PRED_LOAD_CONTINUE48]] ], [ [[TMP146]], [[PRED_LOAD_IF49]] ] +; CHECK-NEXT: [[TMP148:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 +; CHECK-NEXT: br i1 [[TMP148]], label [[PRED_LOAD_IF51:%.*]], label [[PRED_LOAD_CONTINUE52:%.*]] +; CHECK: pred.load.if51: +; CHECK-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP150:%.*]] = load i8, ptr [[TMP149]], align 1 +; CHECK-NEXT: [[TMP151:%.*]] = insertelement <16 x i8> [[TMP147]], i8 [[TMP150]], i32 10 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE52]] +; CHECK: pred.load.continue52: +; CHECK-NEXT: [[TMP152:%.*]] = phi <16 x i8> [ [[TMP147]], [[PRED_LOAD_CONTINUE50]] ], [ [[TMP151]], [[PRED_LOAD_IF51]] ] +; CHECK-NEXT: [[TMP153:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 +; CHECK-NEXT: br i1 [[TMP153]], label [[PRED_LOAD_IF53:%.*]], label [[PRED_LOAD_CONTINUE54:%.*]] +; CHECK: pred.load.if53: +; CHECK-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP155:%.*]] = load i8, ptr [[TMP154]], align 1 +; CHECK-NEXT: [[TMP156:%.*]] = insertelement <16 x i8> [[TMP152]], i8 [[TMP155]], i32 11 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE54]] +; CHECK: pred.load.continue54: +; CHECK-NEXT: [[TMP157:%.*]] = phi <16 x i8> [ [[TMP152]], [[PRED_LOAD_CONTINUE52]] ], [ [[TMP156]], [[PRED_LOAD_IF53]] ] +; CHECK-NEXT: [[TMP158:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 +; CHECK-NEXT: br i1 [[TMP158]], label [[PRED_LOAD_IF55:%.*]], label [[PRED_LOAD_CONTINUE56:%.*]] +; CHECK: pred.load.if55: +; CHECK-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP160:%.*]] = load i8, ptr [[TMP159]], align 1 +; CHECK-NEXT: [[TMP161:%.*]] = insertelement <16 x i8> [[TMP157]], i8 [[TMP160]], i32 12 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE56]] +; CHECK: pred.load.continue56: +; CHECK-NEXT: [[TMP162:%.*]] = phi <16 x i8> [ [[TMP157]], [[PRED_LOAD_CONTINUE54]] ], [ [[TMP161]], [[PRED_LOAD_IF55]] ] +; CHECK-NEXT: [[TMP163:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 +; CHECK-NEXT: br i1 [[TMP163]], label [[PRED_LOAD_IF57:%.*]], label [[PRED_LOAD_CONTINUE58:%.*]] +; CHECK: pred.load.if57: +; CHECK-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP165:%.*]] = load i8, ptr [[TMP164]], align 1 +; CHECK-NEXT: [[TMP166:%.*]] = insertelement <16 x i8> [[TMP162]], i8 [[TMP165]], i32 13 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE58]] +; CHECK: pred.load.continue58: +; CHECK-NEXT: [[TMP167:%.*]] = phi <16 x i8> [ [[TMP162]], [[PRED_LOAD_CONTINUE56]] ], [ [[TMP166]], [[PRED_LOAD_IF57]] ] +; CHECK-NEXT: [[TMP168:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 +; CHECK-NEXT: br i1 [[TMP168]], label [[PRED_LOAD_IF59:%.*]], label [[PRED_LOAD_CONTINUE60:%.*]] +; CHECK: pred.load.if59: +; CHECK-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP170:%.*]] = load i8, ptr [[TMP169]], align 1 +; CHECK-NEXT: [[TMP171:%.*]] = insertelement <16 x i8> [[TMP167]], i8 [[TMP170]], i32 14 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE60]] +; CHECK: pred.load.continue60: +; CHECK-NEXT: [[TMP172:%.*]] = phi <16 x i8> [ [[TMP167]], [[PRED_LOAD_CONTINUE58]] ], [ [[TMP171]], [[PRED_LOAD_IF59]] ] +; CHECK-NEXT: [[TMP173:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 +; CHECK-NEXT: br i1 [[TMP173]], label [[PRED_LOAD_IF61:%.*]], label [[PRED_LOAD_CONTINUE62]] +; CHECK: pred.load.if61: +; CHECK-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP175:%.*]] = load i8, ptr [[TMP174]], align 1 +; CHECK-NEXT: [[TMP176:%.*]] = insertelement <16 x i8> [[TMP172]], i8 [[TMP175]], i32 15 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE62]] +; CHECK: pred.load.continue62: +; CHECK-NEXT: [[TMP177:%.*]] = phi <16 x i8> [ [[TMP172]], [[PRED_LOAD_CONTINUE60]] ], [ [[TMP176]], [[PRED_LOAD_IF61]] ] +; CHECK-NEXT: [[TMP178:%.*]] = sext <16 x i8> [[TMP177]] to <16 x i32> +; CHECK-NEXT: [[TMP179:%.*]] = mul nsw <16 x i32> [[TMP178]], [[TMP97]] +; CHECK-NEXT: [[TMP180:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP179]], <16 x i32> zeroinitializer +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP180]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) +; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP182]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 +; CHECK-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 +; CHECK-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] +; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: exit: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP182]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] + %gep.a = getelementptr inbounds i8, ptr %a, i64 %iv + %load.a = load i8, ptr %gep.a, align 1 + %ext.a = sext i8 %load.a to i32 + %gep.b = getelementptr inbounds i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 + %ext.b = sext i8 %load.b to i32 + %mul = mul nsw i32 %ext.b, %ext.a + %add = add nsw i32 %mul, %accum + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !7 + +exit: ; preds = %for.body + ret i32 %add +} + +!7 = distinct !{!7, !8, !9, !10} +!8 = !{!"llvm.loop.mustprogress"} +!9 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +!10 = !{!"llvm.loop.vectorize.enable", i1 true} + attributes #0 = { vscale_range(1,16) "target-features"="+sve" } attributes #1 = { "target-cpu"="apple-m1" } ;. @@ -241,4 +624,7 @@ attributes #1 = { "target-cpu"="apple-m1" } ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META9:![0-9]+]], [[META1]], [[META2]]} +; CHECK: [[META9]] = !{!"llvm.loop.mustprogress"} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META9]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll index 97a5801d88108..54dc6ce5d3a92 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll @@ -1391,197 +1391,8 @@ exit: ; preds = %for.body ret i32 %result } -define i32 @not_dotp_predicated(i64 %N, ptr %a, ptr %b) { -; CHECK-INTERLEAVE1-LABEL: define i32 @not_dotp_predicated( -; CHECK-INTERLEAVE1-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16 -; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16 -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-INTERLEAVE1: vector.body: -; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul nsw <16 x i32> [[TMP6]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK-INTERLEAVE1: scalar.ph: -; CHECK-INTERLEAVE1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVE1-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-INTERLEAVE1: for.body: -; CHECK-INTERLEAVE1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-INTERLEAVE1: exit: -; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVE1-NEXT: ret i32 [[ADD_LCSSA]] -; -; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_predicated( -; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32 -; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32 -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-INTERLEAVED: vector.body: -; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP9]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP10]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]]) -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP12]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK-INTERLEAVED: scalar.ph: -; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-INTERLEAVED-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-INTERLEAVED: for.body: -; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-INTERLEAVED-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVED-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-INTERLEAVED-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-INTERLEAVED: exit: -; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[ADD_LCSSA]] -; -; CHECK-MAXBW-LABEL: define i32 @not_dotp_predicated( -; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-MAXBW-NEXT: entry: -; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16 -; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-MAXBW: vector.ph: -; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16 -; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-MAXBW: vector.body: -; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] -; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = mul nsw <16 x i32> [[TMP6]], [[TMP3]] -; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-MAXBW: middle.block: -; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK-MAXBW: scalar.ph: -; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-MAXBW: for.body: -; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-MAXBW-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-MAXBW: exit: -; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-MAXBW-NEXT: ret i32 [[ADD_LCSSA]] -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] - %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] - %gep.a = getelementptr inbounds i8, ptr %a, i64 %iv - %load.a = load i8, ptr %gep.a, align 1 - %ext.a = sext i8 %load.a to i32 - %gep.b = getelementptr inbounds i8, ptr %b, i64 %iv - %load.b = load i8, ptr %gep.b, align 1 - %ext.b = sext i8 %load.b to i32 - %mul = mul nsw i32 %ext.b, %ext.a - %add = add nsw i32 %mul, %accum - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %for.body - -exit: ; preds = %for.body - ret i32 %add -} - -define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { -; CHECK-INTERLEAVE1-LABEL: define i32 @not_dotp_predicated_pragma( +define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { +; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVE1-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] @@ -1596,7 +1407,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE62:%.*]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE62]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP180:%.*]], [[PRED_LOAD_CONTINUE62]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[PRED_LOAD_CONTINUE62]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -1617,7 +1428,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP17]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] ; CHECK-INTERLEAVE1: pred.load.if: -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> poison, i8 [[TMP19]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE]] @@ -1626,7 +1437,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP22]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] ; CHECK-INTERLEAVE1: pred.load.if1: -; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] ; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP24]], i32 1 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE2]] @@ -1635,7 +1446,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] ; CHECK-INTERLEAVE1: pred.load.if3: -; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP29]], i32 2 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE4]] @@ -1644,7 +1455,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]] ; CHECK-INTERLEAVE1: pred.load.if5: -; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP33]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP31]], i8 [[TMP34]], i32 3 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE6]] @@ -1653,7 +1464,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]] ; CHECK-INTERLEAVE1: pred.load.if7: -; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = insertelement <16 x i8> [[TMP36]], i8 [[TMP39]], i32 4 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE8]] @@ -1662,7 +1473,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]] ; CHECK-INTERLEAVE1: pred.load.if9: -; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] ; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = load i8, ptr [[TMP43]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = insertelement <16 x i8> [[TMP41]], i8 [[TMP44]], i32 5 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE10]] @@ -1671,7 +1482,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP47:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP47]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]] ; CHECK-INTERLEAVE1: pred.load.if11: -; CHECK-INTERLEAVE1-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP48]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP50:%.*]] = insertelement <16 x i8> [[TMP46]], i8 [[TMP49]], i32 6 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE12]] @@ -1680,7 +1491,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP52]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]] ; CHECK-INTERLEAVE1: pred.load.if13: -; CHECK-INTERLEAVE1-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] +; CHECK-INTERLEAVE1-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] ; CHECK-INTERLEAVE1-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP55:%.*]] = insertelement <16 x i8> [[TMP51]], i8 [[TMP54]], i32 7 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE14]] @@ -1689,7 +1500,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP57:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP57]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; CHECK-INTERLEAVE1: pred.load.if15: -; CHECK-INTERLEAVE1-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP60:%.*]] = insertelement <16 x i8> [[TMP56]], i8 [[TMP59]], i32 8 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE16]] @@ -1698,7 +1509,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP62:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP62]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]] ; CHECK-INTERLEAVE1: pred.load.if17: -; CHECK-INTERLEAVE1-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] +; CHECK-INTERLEAVE1-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP64:%.*]] = load i8, ptr [[TMP63]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP65:%.*]] = insertelement <16 x i8> [[TMP61]], i8 [[TMP64]], i32 9 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE18]] @@ -1707,7 +1518,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP67:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP67]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]] ; CHECK-INTERLEAVE1: pred.load.if19: -; CHECK-INTERLEAVE1-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP69:%.*]] = load i8, ptr [[TMP68]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = insertelement <16 x i8> [[TMP66]], i8 [[TMP69]], i32 10 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE20]] @@ -1716,7 +1527,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP72:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP72]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]] ; CHECK-INTERLEAVE1: pred.load.if21: -; CHECK-INTERLEAVE1-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] +; CHECK-INTERLEAVE1-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] ; CHECK-INTERLEAVE1-NEXT: [[TMP74:%.*]] = load i8, ptr [[TMP73]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP75:%.*]] = insertelement <16 x i8> [[TMP71]], i8 [[TMP74]], i32 11 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE22]] @@ -1725,7 +1536,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP77:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP77]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]] ; CHECK-INTERLEAVE1: pred.load.if23: -; CHECK-INTERLEAVE1-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] +; CHECK-INTERLEAVE1-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP79:%.*]] = load i8, ptr [[TMP78]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP80:%.*]] = insertelement <16 x i8> [[TMP76]], i8 [[TMP79]], i32 12 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE24]] @@ -1734,7 +1545,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP82:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP82]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]] ; CHECK-INTERLEAVE1: pred.load.if25: -; CHECK-INTERLEAVE1-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] +; CHECK-INTERLEAVE1-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] ; CHECK-INTERLEAVE1-NEXT: [[TMP84:%.*]] = load i8, ptr [[TMP83]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP85:%.*]] = insertelement <16 x i8> [[TMP81]], i8 [[TMP84]], i32 13 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE26]] @@ -1743,7 +1554,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP87:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP87]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]] ; CHECK-INTERLEAVE1: pred.load.if27: -; CHECK-INTERLEAVE1-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] +; CHECK-INTERLEAVE1-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[TMP89:%.*]] = load i8, ptr [[TMP88]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP90:%.*]] = insertelement <16 x i8> [[TMP86]], i8 [[TMP89]], i32 14 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE28]] @@ -1752,7 +1563,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP92:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP92]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]] ; CHECK-INTERLEAVE1: pred.load.if29: -; CHECK-INTERLEAVE1-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[TMP94:%.*]] = load i8, ptr [[TMP93]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP95:%.*]] = insertelement <16 x i8> [[TMP91]], i8 [[TMP94]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE30]] @@ -1762,7 +1573,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP98:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP98]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]] ; CHECK-INTERLEAVE1: pred.load.if31: -; CHECK-INTERLEAVE1-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] ; CHECK-INTERLEAVE1-NEXT: [[TMP100:%.*]] = load i8, ptr [[TMP99]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP101:%.*]] = insertelement <16 x i8> poison, i8 [[TMP100]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE32]] @@ -1771,7 +1582,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP103:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP103]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]] ; CHECK-INTERLEAVE1: pred.load.if33: -; CHECK-INTERLEAVE1-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] ; CHECK-INTERLEAVE1-NEXT: [[TMP105:%.*]] = load i8, ptr [[TMP104]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP106:%.*]] = insertelement <16 x i8> [[TMP102]], i8 [[TMP105]], i32 1 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE34]] @@ -1780,7 +1591,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP108:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP108]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36:%.*]] ; CHECK-INTERLEAVE1: pred.load.if35: -; CHECK-INTERLEAVE1-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: [[TMP110:%.*]] = load i8, ptr [[TMP109]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP111:%.*]] = insertelement <16 x i8> [[TMP107]], i8 [[TMP110]], i32 2 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE36]] @@ -1789,7 +1600,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP113:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP113]], label [[PRED_LOAD_IF37:%.*]], label [[PRED_LOAD_CONTINUE38:%.*]] ; CHECK-INTERLEAVE1: pred.load.if37: -; CHECK-INTERLEAVE1-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP115:%.*]] = load i8, ptr [[TMP114]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP116:%.*]] = insertelement <16 x i8> [[TMP112]], i8 [[TMP115]], i32 3 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE38]] @@ -1798,7 +1609,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP118:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP118]], label [[PRED_LOAD_IF39:%.*]], label [[PRED_LOAD_CONTINUE40:%.*]] ; CHECK-INTERLEAVE1: pred.load.if39: -; CHECK-INTERLEAVE1-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP120:%.*]] = load i8, ptr [[TMP119]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP121:%.*]] = insertelement <16 x i8> [[TMP117]], i8 [[TMP120]], i32 4 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE40]] @@ -1807,7 +1618,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP123:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP123]], label [[PRED_LOAD_IF41:%.*]], label [[PRED_LOAD_CONTINUE42:%.*]] ; CHECK-INTERLEAVE1: pred.load.if41: -; CHECK-INTERLEAVE1-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] ; CHECK-INTERLEAVE1-NEXT: [[TMP125:%.*]] = load i8, ptr [[TMP124]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP126:%.*]] = insertelement <16 x i8> [[TMP122]], i8 [[TMP125]], i32 5 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE42]] @@ -1816,7 +1627,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP128:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP128]], label [[PRED_LOAD_IF43:%.*]], label [[PRED_LOAD_CONTINUE44:%.*]] ; CHECK-INTERLEAVE1: pred.load.if43: -; CHECK-INTERLEAVE1-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP130:%.*]] = load i8, ptr [[TMP129]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP131:%.*]] = insertelement <16 x i8> [[TMP127]], i8 [[TMP130]], i32 6 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE44]] @@ -1825,7 +1636,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP133:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP133]], label [[PRED_LOAD_IF45:%.*]], label [[PRED_LOAD_CONTINUE46:%.*]] ; CHECK-INTERLEAVE1: pred.load.if45: -; CHECK-INTERLEAVE1-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] +; CHECK-INTERLEAVE1-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] ; CHECK-INTERLEAVE1-NEXT: [[TMP135:%.*]] = load i8, ptr [[TMP134]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP136:%.*]] = insertelement <16 x i8> [[TMP132]], i8 [[TMP135]], i32 7 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE46]] @@ -1834,7 +1645,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP138:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP138]], label [[PRED_LOAD_IF47:%.*]], label [[PRED_LOAD_CONTINUE48:%.*]] ; CHECK-INTERLEAVE1: pred.load.if47: -; CHECK-INTERLEAVE1-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: [[TMP140:%.*]] = load i8, ptr [[TMP139]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP141:%.*]] = insertelement <16 x i8> [[TMP137]], i8 [[TMP140]], i32 8 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE48]] @@ -1843,7 +1654,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP143:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP143]], label [[PRED_LOAD_IF49:%.*]], label [[PRED_LOAD_CONTINUE50:%.*]] ; CHECK-INTERLEAVE1: pred.load.if49: -; CHECK-INTERLEAVE1-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] +; CHECK-INTERLEAVE1-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP145:%.*]] = load i8, ptr [[TMP144]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP146:%.*]] = insertelement <16 x i8> [[TMP142]], i8 [[TMP145]], i32 9 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE50]] @@ -1852,7 +1663,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP148:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP148]], label [[PRED_LOAD_IF51:%.*]], label [[PRED_LOAD_CONTINUE52:%.*]] ; CHECK-INTERLEAVE1: pred.load.if51: -; CHECK-INTERLEAVE1-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP150:%.*]] = load i8, ptr [[TMP149]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP151:%.*]] = insertelement <16 x i8> [[TMP147]], i8 [[TMP150]], i32 10 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE52]] @@ -1861,7 +1672,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP153:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP153]], label [[PRED_LOAD_IF53:%.*]], label [[PRED_LOAD_CONTINUE54:%.*]] ; CHECK-INTERLEAVE1: pred.load.if53: -; CHECK-INTERLEAVE1-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] +; CHECK-INTERLEAVE1-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] ; CHECK-INTERLEAVE1-NEXT: [[TMP155:%.*]] = load i8, ptr [[TMP154]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP156:%.*]] = insertelement <16 x i8> [[TMP152]], i8 [[TMP155]], i32 11 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE54]] @@ -1870,7 +1681,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP158:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP158]], label [[PRED_LOAD_IF55:%.*]], label [[PRED_LOAD_CONTINUE56:%.*]] ; CHECK-INTERLEAVE1: pred.load.if55: -; CHECK-INTERLEAVE1-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] +; CHECK-INTERLEAVE1-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: [[TMP160:%.*]] = load i8, ptr [[TMP159]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP161:%.*]] = insertelement <16 x i8> [[TMP157]], i8 [[TMP160]], i32 12 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE56]] @@ -1879,7 +1690,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP163:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP163]], label [[PRED_LOAD_IF57:%.*]], label [[PRED_LOAD_CONTINUE58:%.*]] ; CHECK-INTERLEAVE1: pred.load.if57: -; CHECK-INTERLEAVE1-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] +; CHECK-INTERLEAVE1-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] ; CHECK-INTERLEAVE1-NEXT: [[TMP165:%.*]] = load i8, ptr [[TMP164]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP166:%.*]] = insertelement <16 x i8> [[TMP162]], i8 [[TMP165]], i32 13 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE58]] @@ -1888,7 +1699,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP168:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP168]], label [[PRED_LOAD_IF59:%.*]], label [[PRED_LOAD_CONTINUE60:%.*]] ; CHECK-INTERLEAVE1: pred.load.if59: -; CHECK-INTERLEAVE1-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] +; CHECK-INTERLEAVE1-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[TMP170:%.*]] = load i8, ptr [[TMP169]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP171:%.*]] = insertelement <16 x i8> [[TMP167]], i8 [[TMP170]], i32 14 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE60]] @@ -1897,7 +1708,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP173:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP173]], label [[PRED_LOAD_IF61:%.*]], label [[PRED_LOAD_CONTINUE62]] ; CHECK-INTERLEAVE1: pred.load.if61: -; CHECK-INTERLEAVE1-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[TMP175:%.*]] = load i8, ptr [[TMP174]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP176:%.*]] = insertelement <16 x i8> [[TMP172]], i8 [[TMP175]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[PRED_LOAD_CONTINUE62]] @@ -1905,38 +1716,38 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP177:%.*]] = phi <16 x i8> [ [[TMP172]], [[PRED_LOAD_CONTINUE60]] ], [ [[TMP176]], [[PRED_LOAD_IF61]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP178:%.*]] = sext <16 x i8> [[TMP177]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP179:%.*]] = mul nsw <16 x i32> [[TMP178]], [[TMP97]] -; CHECK-INTERLEAVE1-NEXT: [[TMP180]] = add <16 x i32> [[TMP179]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[TMP181:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP180]], <16 x i32> [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[TMP180:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP179]], <16 x i32> zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP180]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) -; CHECK-INTERLEAVE1-NEXT: [[TMP182:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP182]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP183:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP181]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: ; CHECK-INTERLEAVE1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVE1-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP183]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-INTERLEAVE1-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP182]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: for.body: ; CHECK-INTERLEAVE1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-INTERLEAVE1-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: exit: -; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP183]], [[MIDDLE_BLOCK]] ] +; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP182]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: ret i32 [[ADD_LCSSA]] ; -; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_predicated_pragma( +; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] @@ -1951,7 +1762,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE62:%.*]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE62]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP180:%.*]], [[PRED_LOAD_CONTINUE62]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[PRED_LOAD_CONTINUE62]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -1972,7 +1783,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP17]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] ; CHECK-INTERLEAVED: pred.load.if: -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] ; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> poison, i8 [[TMP19]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE]] @@ -1981,7 +1792,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP22]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] ; CHECK-INTERLEAVED: pred.load.if1: -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP24]], i32 1 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE2]] @@ -1990,7 +1801,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] ; CHECK-INTERLEAVED: pred.load.if3: -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP29]], i32 2 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE4]] @@ -1999,7 +1810,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]] ; CHECK-INTERLEAVED: pred.load.if5: -; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP33]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP31]], i8 [[TMP34]], i32 3 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE6]] @@ -2008,7 +1819,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]] ; CHECK-INTERLEAVED: pred.load.if7: -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = insertelement <16 x i8> [[TMP36]], i8 [[TMP39]], i32 4 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE8]] @@ -2017,7 +1828,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]] ; CHECK-INTERLEAVED: pred.load.if9: -; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = load i8, ptr [[TMP43]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = insertelement <16 x i8> [[TMP41]], i8 [[TMP44]], i32 5 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE10]] @@ -2026,7 +1837,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP47]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]] ; CHECK-INTERLEAVED: pred.load.if11: -; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP48]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP50:%.*]] = insertelement <16 x i8> [[TMP46]], i8 [[TMP49]], i32 6 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE12]] @@ -2035,7 +1846,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP52]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]] ; CHECK-INTERLEAVED: pred.load.if13: -; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP55:%.*]] = insertelement <16 x i8> [[TMP51]], i8 [[TMP54]], i32 7 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE14]] @@ -2044,7 +1855,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP57:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP57]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; CHECK-INTERLEAVED: pred.load.if15: -; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP60:%.*]] = insertelement <16 x i8> [[TMP56]], i8 [[TMP59]], i32 8 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE16]] @@ -2053,7 +1864,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP62:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP62]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]] ; CHECK-INTERLEAVED: pred.load.if17: -; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP64:%.*]] = load i8, ptr [[TMP63]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP65:%.*]] = insertelement <16 x i8> [[TMP61]], i8 [[TMP64]], i32 9 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE18]] @@ -2062,7 +1873,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP67:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP67]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]] ; CHECK-INTERLEAVED: pred.load.if19: -; CHECK-INTERLEAVED-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: [[TMP69:%.*]] = load i8, ptr [[TMP68]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP70:%.*]] = insertelement <16 x i8> [[TMP66]], i8 [[TMP69]], i32 10 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE20]] @@ -2071,7 +1882,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP72:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP72]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]] ; CHECK-INTERLEAVED: pred.load.if21: -; CHECK-INTERLEAVED-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[TMP74:%.*]] = load i8, ptr [[TMP73]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP75:%.*]] = insertelement <16 x i8> [[TMP71]], i8 [[TMP74]], i32 11 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE22]] @@ -2080,7 +1891,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP77:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP77]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]] ; CHECK-INTERLEAVED: pred.load.if23: -; CHECK-INTERLEAVED-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[TMP79:%.*]] = load i8, ptr [[TMP78]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP80:%.*]] = insertelement <16 x i8> [[TMP76]], i8 [[TMP79]], i32 12 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE24]] @@ -2089,7 +1900,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP82:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP82]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]] ; CHECK-INTERLEAVED: pred.load.if25: -; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP84:%.*]] = load i8, ptr [[TMP83]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP85:%.*]] = insertelement <16 x i8> [[TMP81]], i8 [[TMP84]], i32 13 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE26]] @@ -2098,7 +1909,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP87:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP87]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]] ; CHECK-INTERLEAVED: pred.load.if27: -; CHECK-INTERLEAVED-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[TMP89:%.*]] = load i8, ptr [[TMP88]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP90:%.*]] = insertelement <16 x i8> [[TMP86]], i8 [[TMP89]], i32 14 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE28]] @@ -2107,7 +1918,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP92:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP92]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]] ; CHECK-INTERLEAVED: pred.load.if29: -; CHECK-INTERLEAVED-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[TMP94:%.*]] = load i8, ptr [[TMP93]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP95:%.*]] = insertelement <16 x i8> [[TMP91]], i8 [[TMP94]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE30]] @@ -2117,7 +1928,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP98:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP98]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]] ; CHECK-INTERLEAVED: pred.load.if31: -; CHECK-INTERLEAVED-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] +; CHECK-INTERLEAVED-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] ; CHECK-INTERLEAVED-NEXT: [[TMP100:%.*]] = load i8, ptr [[TMP99]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP101:%.*]] = insertelement <16 x i8> poison, i8 [[TMP100]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE32]] @@ -2126,7 +1937,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP103:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP103]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]] ; CHECK-INTERLEAVED: pred.load.if33: -; CHECK-INTERLEAVED-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] ; CHECK-INTERLEAVED-NEXT: [[TMP105:%.*]] = load i8, ptr [[TMP104]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP106:%.*]] = insertelement <16 x i8> [[TMP102]], i8 [[TMP105]], i32 1 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE34]] @@ -2135,7 +1946,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP108:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP108]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36:%.*]] ; CHECK-INTERLEAVED: pred.load.if35: -; CHECK-INTERLEAVED-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] ; CHECK-INTERLEAVED-NEXT: [[TMP110:%.*]] = load i8, ptr [[TMP109]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP111:%.*]] = insertelement <16 x i8> [[TMP107]], i8 [[TMP110]], i32 2 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE36]] @@ -2144,7 +1955,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP113:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP113]], label [[PRED_LOAD_IF37:%.*]], label [[PRED_LOAD_CONTINUE38:%.*]] ; CHECK-INTERLEAVED: pred.load.if37: -; CHECK-INTERLEAVED-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP115:%.*]] = load i8, ptr [[TMP114]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP116:%.*]] = insertelement <16 x i8> [[TMP112]], i8 [[TMP115]], i32 3 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE38]] @@ -2153,7 +1964,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP118:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP118]], label [[PRED_LOAD_IF39:%.*]], label [[PRED_LOAD_CONTINUE40:%.*]] ; CHECK-INTERLEAVED: pred.load.if39: -; CHECK-INTERLEAVED-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP120:%.*]] = load i8, ptr [[TMP119]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP121:%.*]] = insertelement <16 x i8> [[TMP117]], i8 [[TMP120]], i32 4 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE40]] @@ -2162,7 +1973,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP123:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP123]], label [[PRED_LOAD_IF41:%.*]], label [[PRED_LOAD_CONTINUE42:%.*]] ; CHECK-INTERLEAVED: pred.load.if41: -; CHECK-INTERLEAVED-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[TMP125:%.*]] = load i8, ptr [[TMP124]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP126:%.*]] = insertelement <16 x i8> [[TMP122]], i8 [[TMP125]], i32 5 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE42]] @@ -2171,7 +1982,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP128:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP128]], label [[PRED_LOAD_IF43:%.*]], label [[PRED_LOAD_CONTINUE44:%.*]] ; CHECK-INTERLEAVED: pred.load.if43: -; CHECK-INTERLEAVED-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP130:%.*]] = load i8, ptr [[TMP129]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP131:%.*]] = insertelement <16 x i8> [[TMP127]], i8 [[TMP130]], i32 6 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE44]] @@ -2180,7 +1991,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP133:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP133]], label [[PRED_LOAD_IF45:%.*]], label [[PRED_LOAD_CONTINUE46:%.*]] ; CHECK-INTERLEAVED: pred.load.if45: -; CHECK-INTERLEAVED-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP135:%.*]] = load i8, ptr [[TMP134]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP136:%.*]] = insertelement <16 x i8> [[TMP132]], i8 [[TMP135]], i32 7 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE46]] @@ -2189,7 +2000,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP138:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP138]], label [[PRED_LOAD_IF47:%.*]], label [[PRED_LOAD_CONTINUE48:%.*]] ; CHECK-INTERLEAVED: pred.load.if47: -; CHECK-INTERLEAVED-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP140:%.*]] = load i8, ptr [[TMP139]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP141:%.*]] = insertelement <16 x i8> [[TMP137]], i8 [[TMP140]], i32 8 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE48]] @@ -2198,7 +2009,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP143:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP143]], label [[PRED_LOAD_IF49:%.*]], label [[PRED_LOAD_CONTINUE50:%.*]] ; CHECK-INTERLEAVED: pred.load.if49: -; CHECK-INTERLEAVED-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP145:%.*]] = load i8, ptr [[TMP144]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP146:%.*]] = insertelement <16 x i8> [[TMP142]], i8 [[TMP145]], i32 9 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE50]] @@ -2207,7 +2018,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP148:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP148]], label [[PRED_LOAD_IF51:%.*]], label [[PRED_LOAD_CONTINUE52:%.*]] ; CHECK-INTERLEAVED: pred.load.if51: -; CHECK-INTERLEAVED-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: [[TMP150:%.*]] = load i8, ptr [[TMP149]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP151:%.*]] = insertelement <16 x i8> [[TMP147]], i8 [[TMP150]], i32 10 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE52]] @@ -2216,7 +2027,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP153:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP153]], label [[PRED_LOAD_IF53:%.*]], label [[PRED_LOAD_CONTINUE54:%.*]] ; CHECK-INTERLEAVED: pred.load.if53: -; CHECK-INTERLEAVED-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[TMP155:%.*]] = load i8, ptr [[TMP154]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP156:%.*]] = insertelement <16 x i8> [[TMP152]], i8 [[TMP155]], i32 11 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE54]] @@ -2225,7 +2036,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP158:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP158]], label [[PRED_LOAD_IF55:%.*]], label [[PRED_LOAD_CONTINUE56:%.*]] ; CHECK-INTERLEAVED: pred.load.if55: -; CHECK-INTERLEAVED-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[TMP160:%.*]] = load i8, ptr [[TMP159]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP161:%.*]] = insertelement <16 x i8> [[TMP157]], i8 [[TMP160]], i32 12 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE56]] @@ -2234,7 +2045,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP163:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP163]], label [[PRED_LOAD_IF57:%.*]], label [[PRED_LOAD_CONTINUE58:%.*]] ; CHECK-INTERLEAVED: pred.load.if57: -; CHECK-INTERLEAVED-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP165:%.*]] = load i8, ptr [[TMP164]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP166:%.*]] = insertelement <16 x i8> [[TMP162]], i8 [[TMP165]], i32 13 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE58]] @@ -2243,7 +2054,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP168:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP168]], label [[PRED_LOAD_IF59:%.*]], label [[PRED_LOAD_CONTINUE60:%.*]] ; CHECK-INTERLEAVED: pred.load.if59: -; CHECK-INTERLEAVED-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[TMP170:%.*]] = load i8, ptr [[TMP169]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP171:%.*]] = insertelement <16 x i8> [[TMP167]], i8 [[TMP170]], i32 14 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE60]] @@ -2252,7 +2063,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP173:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP173]], label [[PRED_LOAD_IF61:%.*]], label [[PRED_LOAD_CONTINUE62]] ; CHECK-INTERLEAVED: pred.load.if61: -; CHECK-INTERLEAVED-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[TMP175:%.*]] = load i8, ptr [[TMP174]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP176:%.*]] = insertelement <16 x i8> [[TMP172]], i8 [[TMP175]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[PRED_LOAD_CONTINUE62]] @@ -2260,38 +2071,38 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP177:%.*]] = phi <16 x i8> [ [[TMP172]], [[PRED_LOAD_CONTINUE60]] ], [ [[TMP176]], [[PRED_LOAD_IF61]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP178:%.*]] = sext <16 x i8> [[TMP177]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP179:%.*]] = mul nsw <16 x i32> [[TMP178]], [[TMP97]] -; CHECK-INTERLEAVED-NEXT: [[TMP180]] = add <16 x i32> [[TMP179]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP181:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP180]], <16 x i32> [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[TMP180:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP179]], <16 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP180]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) -; CHECK-INTERLEAVED-NEXT: [[TMP182:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP182]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[TMP183:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP181]]) +; CHECK-INTERLEAVED-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: ; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP183]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP182]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-INTERLEAVED-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVED: for.body: ; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-INTERLEAVED-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-INTERLEAVED-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVED-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-INTERLEAVED-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: exit: -; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP183]], [[MIDDLE_BLOCK]] ] +; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP182]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[ADD_LCSSA]] ; -; CHECK-MAXBW-LABEL: define i32 @not_dotp_predicated_pragma( +; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-MAXBW-NEXT: entry: ; CHECK-MAXBW-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] @@ -2306,7 +2117,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE62:%.*]] ] ; CHECK-MAXBW-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_LOAD_CONTINUE62]] ] -; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP180:%.*]], [[PRED_LOAD_CONTINUE62]] ] +; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[PRED_LOAD_CONTINUE62]] ] ; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -2327,7 +2138,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-MAXBW-NEXT: br i1 [[TMP17]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] ; CHECK-MAXBW: pred.load.if: -; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] +; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1 ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> poison, i8 [[TMP19]], i32 0 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE]] @@ -2336,7 +2147,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] ; CHECK-MAXBW: pred.load.if1: -; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] +; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] ; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1 ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP24]], i32 1 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE2]] @@ -2345,7 +2156,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-MAXBW-NEXT: br i1 [[TMP27]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] ; CHECK-MAXBW: pred.load.if3: -; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] +; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] ; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1 ; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP29]], i32 2 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE4]] @@ -2354,7 +2165,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6:%.*]] ; CHECK-MAXBW: pred.load.if5: -; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] +; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP33]], align 1 ; CHECK-MAXBW-NEXT: [[TMP35:%.*]] = insertelement <16 x i8> [[TMP31]], i8 [[TMP34]], i32 3 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE6]] @@ -2363,7 +2174,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-MAXBW-NEXT: br i1 [[TMP37]], label [[PRED_LOAD_IF7:%.*]], label [[PRED_LOAD_CONTINUE8:%.*]] ; CHECK-MAXBW: pred.load.if7: -; CHECK-MAXBW-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] +; CHECK-MAXBW-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP38]], align 1 ; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = insertelement <16 x i8> [[TMP36]], i8 [[TMP39]], i32 4 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE8]] @@ -2372,7 +2183,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP42:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-MAXBW-NEXT: br i1 [[TMP42]], label [[PRED_LOAD_IF9:%.*]], label [[PRED_LOAD_CONTINUE10:%.*]] ; CHECK-MAXBW: pred.load.if9: -; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] +; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP44:%.*]] = load i8, ptr [[TMP43]], align 1 ; CHECK-MAXBW-NEXT: [[TMP45:%.*]] = insertelement <16 x i8> [[TMP41]], i8 [[TMP44]], i32 5 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE10]] @@ -2381,7 +2192,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP47:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-MAXBW-NEXT: br i1 [[TMP47]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]] ; CHECK-MAXBW: pred.load.if11: -; CHECK-MAXBW-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] +; CHECK-MAXBW-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] ; CHECK-MAXBW-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP48]], align 1 ; CHECK-MAXBW-NEXT: [[TMP50:%.*]] = insertelement <16 x i8> [[TMP46]], i8 [[TMP49]], i32 6 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE12]] @@ -2390,7 +2201,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-MAXBW-NEXT: br i1 [[TMP52]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]] ; CHECK-MAXBW: pred.load.if13: -; CHECK-MAXBW-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] +; CHECK-MAXBW-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] ; CHECK-MAXBW-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1 ; CHECK-MAXBW-NEXT: [[TMP55:%.*]] = insertelement <16 x i8> [[TMP51]], i8 [[TMP54]], i32 7 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE14]] @@ -2399,7 +2210,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP57:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-MAXBW-NEXT: br i1 [[TMP57]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; CHECK-MAXBW: pred.load.if15: -; CHECK-MAXBW-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] +; CHECK-MAXBW-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] ; CHECK-MAXBW-NEXT: [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1 ; CHECK-MAXBW-NEXT: [[TMP60:%.*]] = insertelement <16 x i8> [[TMP56]], i8 [[TMP59]], i32 8 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE16]] @@ -2408,7 +2219,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP62:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-MAXBW-NEXT: br i1 [[TMP62]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]] ; CHECK-MAXBW: pred.load.if17: -; CHECK-MAXBW-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] +; CHECK-MAXBW-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-MAXBW-NEXT: [[TMP64:%.*]] = load i8, ptr [[TMP63]], align 1 ; CHECK-MAXBW-NEXT: [[TMP65:%.*]] = insertelement <16 x i8> [[TMP61]], i8 [[TMP64]], i32 9 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE18]] @@ -2417,7 +2228,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP67:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-MAXBW-NEXT: br i1 [[TMP67]], label [[PRED_LOAD_IF19:%.*]], label [[PRED_LOAD_CONTINUE20:%.*]] ; CHECK-MAXBW: pred.load.if19: -; CHECK-MAXBW-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-MAXBW-NEXT: [[TMP68:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] ; CHECK-MAXBW-NEXT: [[TMP69:%.*]] = load i8, ptr [[TMP68]], align 1 ; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = insertelement <16 x i8> [[TMP66]], i8 [[TMP69]], i32 10 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE20]] @@ -2426,7 +2237,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP72:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-MAXBW-NEXT: br i1 [[TMP72]], label [[PRED_LOAD_IF21:%.*]], label [[PRED_LOAD_CONTINUE22:%.*]] ; CHECK-MAXBW: pred.load.if21: -; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] +; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] ; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = load i8, ptr [[TMP73]], align 1 ; CHECK-MAXBW-NEXT: [[TMP75:%.*]] = insertelement <16 x i8> [[TMP71]], i8 [[TMP74]], i32 11 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE22]] @@ -2435,7 +2246,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP77:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-MAXBW-NEXT: br i1 [[TMP77]], label [[PRED_LOAD_IF23:%.*]], label [[PRED_LOAD_CONTINUE24:%.*]] ; CHECK-MAXBW: pred.load.if23: -; CHECK-MAXBW-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] +; CHECK-MAXBW-NEXT: [[TMP78:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] ; CHECK-MAXBW-NEXT: [[TMP79:%.*]] = load i8, ptr [[TMP78]], align 1 ; CHECK-MAXBW-NEXT: [[TMP80:%.*]] = insertelement <16 x i8> [[TMP76]], i8 [[TMP79]], i32 12 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE24]] @@ -2444,7 +2255,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP82:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-MAXBW-NEXT: br i1 [[TMP82]], label [[PRED_LOAD_IF25:%.*]], label [[PRED_LOAD_CONTINUE26:%.*]] ; CHECK-MAXBW: pred.load.if25: -; CHECK-MAXBW-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] +; CHECK-MAXBW-NEXT: [[TMP83:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] ; CHECK-MAXBW-NEXT: [[TMP84:%.*]] = load i8, ptr [[TMP83]], align 1 ; CHECK-MAXBW-NEXT: [[TMP85:%.*]] = insertelement <16 x i8> [[TMP81]], i8 [[TMP84]], i32 13 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE26]] @@ -2453,7 +2264,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP87:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-MAXBW-NEXT: br i1 [[TMP87]], label [[PRED_LOAD_IF27:%.*]], label [[PRED_LOAD_CONTINUE28:%.*]] ; CHECK-MAXBW: pred.load.if27: -; CHECK-MAXBW-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] +; CHECK-MAXBW-NEXT: [[TMP88:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] ; CHECK-MAXBW-NEXT: [[TMP89:%.*]] = load i8, ptr [[TMP88]], align 1 ; CHECK-MAXBW-NEXT: [[TMP90:%.*]] = insertelement <16 x i8> [[TMP86]], i8 [[TMP89]], i32 14 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE28]] @@ -2462,7 +2273,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP92:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-MAXBW-NEXT: br i1 [[TMP92]], label [[PRED_LOAD_IF29:%.*]], label [[PRED_LOAD_CONTINUE30:%.*]] ; CHECK-MAXBW: pred.load.if29: -; CHECK-MAXBW-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] +; CHECK-MAXBW-NEXT: [[TMP93:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] ; CHECK-MAXBW-NEXT: [[TMP94:%.*]] = load i8, ptr [[TMP93]], align 1 ; CHECK-MAXBW-NEXT: [[TMP95:%.*]] = insertelement <16 x i8> [[TMP91]], i8 [[TMP94]], i32 15 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE30]] @@ -2472,7 +2283,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP98:%.*]] = extractelement <16 x i1> [[TMP16]], i32 0 ; CHECK-MAXBW-NEXT: br i1 [[TMP98]], label [[PRED_LOAD_IF31:%.*]], label [[PRED_LOAD_CONTINUE32:%.*]] ; CHECK-MAXBW: pred.load.if31: -; CHECK-MAXBW-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] +; CHECK-MAXBW-NEXT: [[TMP99:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] ; CHECK-MAXBW-NEXT: [[TMP100:%.*]] = load i8, ptr [[TMP99]], align 1 ; CHECK-MAXBW-NEXT: [[TMP101:%.*]] = insertelement <16 x i8> poison, i8 [[TMP100]], i32 0 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE32]] @@ -2481,7 +2292,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP103:%.*]] = extractelement <16 x i1> [[TMP16]], i32 1 ; CHECK-MAXBW-NEXT: br i1 [[TMP103]], label [[PRED_LOAD_IF33:%.*]], label [[PRED_LOAD_CONTINUE34:%.*]] ; CHECK-MAXBW: pred.load.if33: -; CHECK-MAXBW-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]] +; CHECK-MAXBW-NEXT: [[TMP104:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP1]] ; CHECK-MAXBW-NEXT: [[TMP105:%.*]] = load i8, ptr [[TMP104]], align 1 ; CHECK-MAXBW-NEXT: [[TMP106:%.*]] = insertelement <16 x i8> [[TMP102]], i8 [[TMP105]], i32 1 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE34]] @@ -2490,7 +2301,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP108:%.*]] = extractelement <16 x i1> [[TMP16]], i32 2 ; CHECK-MAXBW-NEXT: br i1 [[TMP108]], label [[PRED_LOAD_IF35:%.*]], label [[PRED_LOAD_CONTINUE36:%.*]] ; CHECK-MAXBW: pred.load.if35: -; CHECK-MAXBW-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP2]] +; CHECK-MAXBW-NEXT: [[TMP109:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP2]] ; CHECK-MAXBW-NEXT: [[TMP110:%.*]] = load i8, ptr [[TMP109]], align 1 ; CHECK-MAXBW-NEXT: [[TMP111:%.*]] = insertelement <16 x i8> [[TMP107]], i8 [[TMP110]], i32 2 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE36]] @@ -2499,7 +2310,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP113:%.*]] = extractelement <16 x i1> [[TMP16]], i32 3 ; CHECK-MAXBW-NEXT: br i1 [[TMP113]], label [[PRED_LOAD_IF37:%.*]], label [[PRED_LOAD_CONTINUE38:%.*]] ; CHECK-MAXBW: pred.load.if37: -; CHECK-MAXBW-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] +; CHECK-MAXBW-NEXT: [[TMP114:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP115:%.*]] = load i8, ptr [[TMP114]], align 1 ; CHECK-MAXBW-NEXT: [[TMP116:%.*]] = insertelement <16 x i8> [[TMP112]], i8 [[TMP115]], i32 3 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE38]] @@ -2508,7 +2319,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP118:%.*]] = extractelement <16 x i1> [[TMP16]], i32 4 ; CHECK-MAXBW-NEXT: br i1 [[TMP118]], label [[PRED_LOAD_IF39:%.*]], label [[PRED_LOAD_CONTINUE40:%.*]] ; CHECK-MAXBW: pred.load.if39: -; CHECK-MAXBW-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP4]] +; CHECK-MAXBW-NEXT: [[TMP119:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP120:%.*]] = load i8, ptr [[TMP119]], align 1 ; CHECK-MAXBW-NEXT: [[TMP121:%.*]] = insertelement <16 x i8> [[TMP117]], i8 [[TMP120]], i32 4 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE40]] @@ -2517,7 +2328,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP123:%.*]] = extractelement <16 x i1> [[TMP16]], i32 5 ; CHECK-MAXBW-NEXT: br i1 [[TMP123]], label [[PRED_LOAD_IF41:%.*]], label [[PRED_LOAD_CONTINUE42:%.*]] ; CHECK-MAXBW: pred.load.if41: -; CHECK-MAXBW-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP5]] +; CHECK-MAXBW-NEXT: [[TMP124:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP125:%.*]] = load i8, ptr [[TMP124]], align 1 ; CHECK-MAXBW-NEXT: [[TMP126:%.*]] = insertelement <16 x i8> [[TMP122]], i8 [[TMP125]], i32 5 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE42]] @@ -2526,7 +2337,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP128:%.*]] = extractelement <16 x i1> [[TMP16]], i32 6 ; CHECK-MAXBW-NEXT: br i1 [[TMP128]], label [[PRED_LOAD_IF43:%.*]], label [[PRED_LOAD_CONTINUE44:%.*]] ; CHECK-MAXBW: pred.load.if43: -; CHECK-MAXBW-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] +; CHECK-MAXBW-NEXT: [[TMP129:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] ; CHECK-MAXBW-NEXT: [[TMP130:%.*]] = load i8, ptr [[TMP129]], align 1 ; CHECK-MAXBW-NEXT: [[TMP131:%.*]] = insertelement <16 x i8> [[TMP127]], i8 [[TMP130]], i32 6 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE44]] @@ -2535,7 +2346,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP133:%.*]] = extractelement <16 x i1> [[TMP16]], i32 7 ; CHECK-MAXBW-NEXT: br i1 [[TMP133]], label [[PRED_LOAD_IF45:%.*]], label [[PRED_LOAD_CONTINUE46:%.*]] ; CHECK-MAXBW: pred.load.if45: -; CHECK-MAXBW-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP7]] +; CHECK-MAXBW-NEXT: [[TMP134:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP7]] ; CHECK-MAXBW-NEXT: [[TMP135:%.*]] = load i8, ptr [[TMP134]], align 1 ; CHECK-MAXBW-NEXT: [[TMP136:%.*]] = insertelement <16 x i8> [[TMP132]], i8 [[TMP135]], i32 7 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE46]] @@ -2544,7 +2355,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP138:%.*]] = extractelement <16 x i1> [[TMP16]], i32 8 ; CHECK-MAXBW-NEXT: br i1 [[TMP138]], label [[PRED_LOAD_IF47:%.*]], label [[PRED_LOAD_CONTINUE48:%.*]] ; CHECK-MAXBW: pred.load.if47: -; CHECK-MAXBW-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP8]] +; CHECK-MAXBW-NEXT: [[TMP139:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP8]] ; CHECK-MAXBW-NEXT: [[TMP140:%.*]] = load i8, ptr [[TMP139]], align 1 ; CHECK-MAXBW-NEXT: [[TMP141:%.*]] = insertelement <16 x i8> [[TMP137]], i8 [[TMP140]], i32 8 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE48]] @@ -2553,7 +2364,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP143:%.*]] = extractelement <16 x i1> [[TMP16]], i32 9 ; CHECK-MAXBW-NEXT: br i1 [[TMP143]], label [[PRED_LOAD_IF49:%.*]], label [[PRED_LOAD_CONTINUE50:%.*]] ; CHECK-MAXBW: pred.load.if49: -; CHECK-MAXBW-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] +; CHECK-MAXBW-NEXT: [[TMP144:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] ; CHECK-MAXBW-NEXT: [[TMP145:%.*]] = load i8, ptr [[TMP144]], align 1 ; CHECK-MAXBW-NEXT: [[TMP146:%.*]] = insertelement <16 x i8> [[TMP142]], i8 [[TMP145]], i32 9 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE50]] @@ -2562,7 +2373,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP148:%.*]] = extractelement <16 x i1> [[TMP16]], i32 10 ; CHECK-MAXBW-NEXT: br i1 [[TMP148]], label [[PRED_LOAD_IF51:%.*]], label [[PRED_LOAD_CONTINUE52:%.*]] ; CHECK-MAXBW: pred.load.if51: -; CHECK-MAXBW-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-MAXBW-NEXT: [[TMP149:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] ; CHECK-MAXBW-NEXT: [[TMP150:%.*]] = load i8, ptr [[TMP149]], align 1 ; CHECK-MAXBW-NEXT: [[TMP151:%.*]] = insertelement <16 x i8> [[TMP147]], i8 [[TMP150]], i32 10 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE52]] @@ -2571,7 +2382,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP153:%.*]] = extractelement <16 x i1> [[TMP16]], i32 11 ; CHECK-MAXBW-NEXT: br i1 [[TMP153]], label [[PRED_LOAD_IF53:%.*]], label [[PRED_LOAD_CONTINUE54:%.*]] ; CHECK-MAXBW: pred.load.if53: -; CHECK-MAXBW-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP11]] +; CHECK-MAXBW-NEXT: [[TMP154:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP11]] ; CHECK-MAXBW-NEXT: [[TMP155:%.*]] = load i8, ptr [[TMP154]], align 1 ; CHECK-MAXBW-NEXT: [[TMP156:%.*]] = insertelement <16 x i8> [[TMP152]], i8 [[TMP155]], i32 11 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE54]] @@ -2580,7 +2391,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP158:%.*]] = extractelement <16 x i1> [[TMP16]], i32 12 ; CHECK-MAXBW-NEXT: br i1 [[TMP158]], label [[PRED_LOAD_IF55:%.*]], label [[PRED_LOAD_CONTINUE56:%.*]] ; CHECK-MAXBW: pred.load.if55: -; CHECK-MAXBW-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP12]] +; CHECK-MAXBW-NEXT: [[TMP159:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP12]] ; CHECK-MAXBW-NEXT: [[TMP160:%.*]] = load i8, ptr [[TMP159]], align 1 ; CHECK-MAXBW-NEXT: [[TMP161:%.*]] = insertelement <16 x i8> [[TMP157]], i8 [[TMP160]], i32 12 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE56]] @@ -2589,7 +2400,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP163:%.*]] = extractelement <16 x i1> [[TMP16]], i32 13 ; CHECK-MAXBW-NEXT: br i1 [[TMP163]], label [[PRED_LOAD_IF57:%.*]], label [[PRED_LOAD_CONTINUE58:%.*]] ; CHECK-MAXBW: pred.load.if57: -; CHECK-MAXBW-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP13]] +; CHECK-MAXBW-NEXT: [[TMP164:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP13]] ; CHECK-MAXBW-NEXT: [[TMP165:%.*]] = load i8, ptr [[TMP164]], align 1 ; CHECK-MAXBW-NEXT: [[TMP166:%.*]] = insertelement <16 x i8> [[TMP162]], i8 [[TMP165]], i32 13 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE58]] @@ -2598,7 +2409,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP168:%.*]] = extractelement <16 x i1> [[TMP16]], i32 14 ; CHECK-MAXBW-NEXT: br i1 [[TMP168]], label [[PRED_LOAD_IF59:%.*]], label [[PRED_LOAD_CONTINUE60:%.*]] ; CHECK-MAXBW: pred.load.if59: -; CHECK-MAXBW-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP14]] +; CHECK-MAXBW-NEXT: [[TMP169:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP14]] ; CHECK-MAXBW-NEXT: [[TMP170:%.*]] = load i8, ptr [[TMP169]], align 1 ; CHECK-MAXBW-NEXT: [[TMP171:%.*]] = insertelement <16 x i8> [[TMP167]], i8 [[TMP170]], i32 14 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE60]] @@ -2607,7 +2418,7 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP173:%.*]] = extractelement <16 x i1> [[TMP16]], i32 15 ; CHECK-MAXBW-NEXT: br i1 [[TMP173]], label [[PRED_LOAD_IF61:%.*]], label [[PRED_LOAD_CONTINUE62]] ; CHECK-MAXBW: pred.load.if61: -; CHECK-MAXBW-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]] +; CHECK-MAXBW-NEXT: [[TMP174:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]] ; CHECK-MAXBW-NEXT: [[TMP175:%.*]] = load i8, ptr [[TMP174]], align 1 ; CHECK-MAXBW-NEXT: [[TMP176:%.*]] = insertelement <16 x i8> [[TMP172]], i8 [[TMP175]], i32 15 ; CHECK-MAXBW-NEXT: br label [[PRED_LOAD_CONTINUE62]] @@ -2615,35 +2426,35 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP177:%.*]] = phi <16 x i8> [ [[TMP172]], [[PRED_LOAD_CONTINUE60]] ], [ [[TMP176]], [[PRED_LOAD_IF61]] ] ; CHECK-MAXBW-NEXT: [[TMP178:%.*]] = sext <16 x i8> [[TMP177]] to <16 x i32> ; CHECK-MAXBW-NEXT: [[TMP179:%.*]] = mul nsw <16 x i32> [[TMP178]], [[TMP97]] -; CHECK-MAXBW-NEXT: [[TMP180]] = add <16 x i32> [[TMP179]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[TMP181:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP180]], <16 x i32> [[VEC_PHI]] +; CHECK-MAXBW-NEXT: [[TMP180:%.*]] = select <16 x i1> [[TMP16]], <16 x i32> [[TMP179]], <16 x i32> zeroinitializer +; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP180]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) -; CHECK-MAXBW-NEXT: [[TMP182:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP182]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-MAXBW: middle.block: -; CHECK-MAXBW-NEXT: [[TMP183:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP181]]) +; CHECK-MAXBW-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-MAXBW: scalar.ph: ; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP183]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP182]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-MAXBW: for.body: ; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-MAXBW-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-MAXBW-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-MAXBW: exit: -; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP183]], [[MIDDLE_BLOCK]] ] +; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP182]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: ret i32 [[ADD_LCSSA]] ; entry: @@ -2652,11 +2463,11 @@ entry: for.body: ; preds = %entry, %for.body %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] - %gep.a = getelementptr inbounds i8, ptr %b, i64 %iv + %gep.a = getelementptr inbounds i8, ptr %a, i64 %iv %load.a = load i8, ptr %gep.a, align 1 %ext.a = sext i8 %load.a to i32 - %gep.a2 = getelementptr inbounds i8, ptr %a, i64 %iv - %load.b = load i8, ptr %gep.a2, align 1 + %gep.b = getelementptr inbounds i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 %ext.b = sext i8 %load.b to i32 %mul = mul nsw i32 %ext.b, %ext.a %add = add nsw i32 %mul, %accum @@ -2691,7 +2502,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 @@ -2713,7 +2524,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: for.exit: ; CHECK-INTERLEAVE1-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] @@ -2751,7 +2562,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -2774,7 +2585,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: for.exit: ; CHECK-INTERLEAVED-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] @@ -2803,7 +2614,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 @@ -2825,7 +2636,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-MAXBW: for.exit: ; CHECK-MAXBW-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] @@ -2872,13 +2683,11 @@ for.exit: ; preds = %for.body ; CHECK-INTERLEAVE1: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} ; CHECK-INTERLEAVE1: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} ; CHECK-INTERLEAVE1: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK-INTERLEAVE1: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK-INTERLEAVE1: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]} -; CHECK-INTERLEAVE1: [[LOOP14]] = distinct !{[[LOOP14]], [[META15:![0-9]+]], [[META1]], [[META2]]} -; CHECK-INTERLEAVE1: [[META15]] = !{!"llvm.loop.mustprogress"} -; CHECK-INTERLEAVE1: [[LOOP16]] = distinct !{[[LOOP16]], [[META15]], [[META2]], [[META1]]} -; CHECK-INTERLEAVE1: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK-INTERLEAVE1: [[LOOP18]] = distinct !{[[LOOP18]], [[META2]], [[META1]]} +; CHECK-INTERLEAVE1: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]], [[META1]], [[META2]]} +; CHECK-INTERLEAVE1: [[META13]] = !{!"llvm.loop.mustprogress"} +; CHECK-INTERLEAVE1: [[LOOP14]] = distinct !{[[LOOP14]], [[META13]], [[META2]], [[META1]]} +; CHECK-INTERLEAVE1: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} +; CHECK-INTERLEAVE1: [[LOOP16]] = distinct !{[[LOOP16]], [[META2]], [[META1]]} ;. ; CHECK-INTERLEAVED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK-INTERLEAVED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} @@ -2892,13 +2701,11 @@ for.exit: ; preds = %for.body ; CHECK-INTERLEAVED: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} ; CHECK-INTERLEAVED: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} ; CHECK-INTERLEAVED: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK-INTERLEAVED: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK-INTERLEAVED: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]} -; CHECK-INTERLEAVED: [[LOOP14]] = distinct !{[[LOOP14]], [[META15:![0-9]+]], [[META1]], [[META2]]} -; CHECK-INTERLEAVED: [[META15]] = !{!"llvm.loop.mustprogress"} -; CHECK-INTERLEAVED: [[LOOP16]] = distinct !{[[LOOP16]], [[META15]], [[META2]], [[META1]]} -; CHECK-INTERLEAVED: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK-INTERLEAVED: [[LOOP18]] = distinct !{[[LOOP18]], [[META2]], [[META1]]} +; CHECK-INTERLEAVED: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]], [[META1]], [[META2]]} +; CHECK-INTERLEAVED: [[META13]] = !{!"llvm.loop.mustprogress"} +; CHECK-INTERLEAVED: [[LOOP14]] = distinct !{[[LOOP14]], [[META13]], [[META2]], [[META1]]} +; CHECK-INTERLEAVED: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} +; CHECK-INTERLEAVED: [[LOOP16]] = distinct !{[[LOOP16]], [[META2]], [[META1]]} ;. ; CHECK-MAXBW: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK-MAXBW: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} @@ -2912,11 +2719,9 @@ for.exit: ; preds = %for.body ; CHECK-MAXBW: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} ; CHECK-MAXBW: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} ; CHECK-MAXBW: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK-MAXBW: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK-MAXBW: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]} -; CHECK-MAXBW: [[LOOP14]] = distinct !{[[LOOP14]], [[META15:![0-9]+]], [[META1]], [[META2]]} -; CHECK-MAXBW: [[META15]] = !{!"llvm.loop.mustprogress"} -; CHECK-MAXBW: [[LOOP16]] = distinct !{[[LOOP16]], [[META15]], [[META2]], [[META1]]} -; CHECK-MAXBW: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK-MAXBW: [[LOOP18]] = distinct !{[[LOOP18]], [[META2]], [[META1]]} +; CHECK-MAXBW: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]], [[META1]], [[META2]]} +; CHECK-MAXBW: [[META13]] = !{!"llvm.loop.mustprogress"} +; CHECK-MAXBW: [[LOOP14]] = distinct !{[[LOOP14]], [[META13]], [[META2]], [[META1]]} +; CHECK-MAXBW: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} +; CHECK-MAXBW: [[LOOP16]] = distinct !{[[LOOP16]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 9d0d30abce6c9..2aaea965cb645 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -1625,259 +1625,48 @@ exit: ; preds = %for.body ret i32 %result } -define i32 @not_dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { -; CHECK-INTERLEAVE1-LABEL: define i32 @not_dotp_predicated( -; CHECK-INTERLEAVE1-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP7]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul i64 [[TMP10]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = mul i64 [[TMP11]], 4 -; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-INTERLEAVE1: vector.body: -; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = sext [[WIDE_LOAD]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext [[WIDE_LOAD1]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw [[TMP12]], [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add [[TMP13]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]] -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP14]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_PH]] -; CHECK-INTERLEAVE1: scalar.ph: -; CHECK-INTERLEAVE1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVE1-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP16]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-INTERLEAVE1: for.body: -; CHECK-INTERLEAVE1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[VEC_EPILOG_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-INTERLEAVE1: exit: -; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVE1-NEXT: ret i32 [[ADD_LCSSA]] -; -; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_predicated( -; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul i64 [[TMP8]], 8 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP15]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 -; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-INTERLEAVED: vector.body: -; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = sext [[WIDE_LOAD]] to -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext [[WIDE_LOAD2]] to -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP18]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext [[WIDE_LOAD3]] to -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext [[WIDE_LOAD4]] to -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nsw [[TMP19]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nsw [[TMP20]], [[TMP25]] -; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add [[TMP21]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add [[TMP22]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP24]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_PH]] -; CHECK-INTERLEAVED: scalar.ph: -; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP26]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-INTERLEAVED-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-INTERLEAVED: for.body: -; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[VEC_EPILOG_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-INTERLEAVED-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVED-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-INTERLEAVED-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-INTERLEAVED: exit: -; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[ADD_LCSSA]] -; -; CHECK-MAXBW-LABEL: define i32 @not_dotp_predicated( -; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-MAXBW-NEXT: entry: -; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 -; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK-MAXBW: vector.ph: -; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 -; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 -; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK-MAXBW: vector.body: -; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 -; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]] -; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP8]], align 1 -; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD2]] to -; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]] -; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP15]], align 1 -; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = sext [[WIDE_LOAD4]] to -; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul nsw [[TMP20]], [[TMP13]] -; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32( [[VEC_PHI1]], [[TMP22]]) -; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK-MAXBW: middle.block: -; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32( [[PARTIAL_REDUCE5]]) -; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK-MAXBW: scalar.ph: -; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP16]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-MAXBW: for.body: -; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 -; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] -; CHECK-MAXBW-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] -; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK-MAXBW: exit: -; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] -; CHECK-MAXBW-NEXT: ret i32 [[ADD_LCSSA]] -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] - %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] - %gep.a = getelementptr inbounds i8, ptr %a, i64 %iv - %load.a = load i8, ptr %gep.a, align 1 - %ext.a = sext i8 %load.a to i32 - %gep.b = getelementptr inbounds i8, ptr %b, i64 %iv - %load.b = load i8, ptr %gep.b, align 1 - %ext.b = sext i8 %load.b to i32 - %mul = mul nsw i32 %ext.b, %ext.a - %add = add nsw i32 %mul, %accum - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %for.body - -exit: ; preds = %for.body - ret i32 %add -} - -define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) #0 { -; CHECK-INTERLEAVE1-LABEL: define i32 @not_dotp_predicated_pragma( +define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { +; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVE1-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 +; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul i64 [[TMP12]], 4 -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 4 -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 [[TMP0]], i64 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 +; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP5]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP12]], i32 1, [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP8]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP15]], i32 1, [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = extractelement [[TMP20]], i32 0 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -1888,62 +1677,62 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1: for.body: ; CHECK-INTERLEAVE1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-INTERLEAVE1-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVE1-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: exit: ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: ret i32 [[ADD_LCSSA]] ; -; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_predicated_pragma( +; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 -; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 +; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul i64 [[TMP12]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 4 -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 [[TMP0]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP5]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP12]], i32 1, [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP8]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP15]], i32 1, [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] ; CHECK-INTERLEAVED-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = extractelement [[TMP20]], i32 0 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -1954,85 +1743,85 @@ define i32 @not_dotp_predicated_pragma(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED: for.body: ; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-INTERLEAVED-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-INTERLEAVED-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-INTERLEAVED-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-INTERLEAVED-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: exit: ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[ADD_LCSSA]] ; -; CHECK-MAXBW-LABEL: define i32 @not_dotp_predicated_pragma( +; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-MAXBW-NEXT: entry: ; CHECK-MAXBW-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-MAXBW: vector.ph: ; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 16 ; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 ; CHECK-MAXBW-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 16 ; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 +; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16 ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) +; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[N]]) ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-MAXBW: vector.body: ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] +; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] +; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP12]], i32 1, [[ACTIVE_LANE_MASK]], poison) -; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to -; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP10]] +; CHECK-MAXBW-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr [[TMP12]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP10]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i32 0 -; CHECK-MAXBW-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP15]], i32 1, [[ACTIVE_LANE_MASK]], poison) -; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to -; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] -; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] -; CHECK-MAXBW-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] +; CHECK-MAXBW-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr [[TMP15]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to +; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] +; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP17]], zeroinitializer +; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP18]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = extractelement [[TMP20]], i32 0 -; CHECK-MAXBW-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = extractelement [[TMP19]], i32 0 +; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-MAXBW: middle.block: -; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) +; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-MAXBW: scalar.ph: ; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP22]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-MAXBW: for.body: ; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 ; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = sext i8 [[LOAD_A]] to i32 -; CHECK-MAXBW-NEXT: [[GEP_A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_A2]], align 1 +; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[IV]] +; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 ; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = sext i8 [[LOAD_B]] to i32 ; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul nsw i32 [[EXT_B]], [[EXT_A]] ; CHECK-MAXBW-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-MAXBW: exit: -; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] +; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: ret i32 [[ADD_LCSSA]] ; entry: @@ -2041,11 +1830,11 @@ entry: for.body: ; preds = %entry, %for.body %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] - %gep.a = getelementptr inbounds i8, ptr %b, i64 %iv + %gep.a = getelementptr inbounds i8, ptr %a, i64 %iv %load.a = load i8, ptr %gep.a, align 1 %ext.a = sext i8 %load.a to i32 - %gep.a2 = getelementptr inbounds i8, ptr %a, i64 %iv - %load.b = load i8, ptr %gep.a2, align 1 + %gep.b = getelementptr inbounds i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 %ext.b = sext i8 %load.b to i32 %mul = mul nsw i32 %ext.b, %ext.a %add = add nsw i32 %mul, %accum @@ -2088,7 +1877,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add [[TMP13]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP14]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -2114,7 +1903,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: for.exit: ; CHECK-INTERLEAVE1-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] @@ -2164,7 +1953,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add [[TMP22]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP24]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) @@ -2191,7 +1980,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: for.exit: ; CHECK-INTERLEAVED-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ] @@ -2228,7 +2017,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP24]] = add [[TMP22]], [[VEC_PHI1]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP24]]) ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -2254,7 +2043,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-MAXBW: for.exit: ; CHECK-MAXBW-NEXT: [[EXT_B_LCSSA:%.*]] = phi i32 [ [[EXT_B]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] @@ -2317,7 +2106,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add [[VEC_PHI]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP15]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 16, [[N_VEC]] @@ -2339,7 +2128,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul nuw nsw i64 [[CONV3]], [[CONV]] ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i64 [[SUM]], [[MUL]] ; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[I_IV_NEXT]], 16 -; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVE1: exit: ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: ret i64 [[ADD_LCSSA]] @@ -2389,7 +2178,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add [[VEC_PHI1]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP25]], [[TMP24]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) @@ -2412,7 +2201,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nuw nsw i64 [[CONV3]], [[CONV]] ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i64 [[SUM]], [[MUL]] ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[I_IV_NEXT]], 16 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: exit: ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i64 [[ADD_LCSSA]] @@ -2449,7 +2238,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP19]] = add [[VEC_PHI]], [[TMP14]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64( [[TMP19]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 16, [[N_VEC]] @@ -2471,7 +2260,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul nuw nsw i64 [[CONV3]], [[CONV]] ; CHECK-MAXBW-NEXT: [[ADD]] = add i64 [[SUM]], [[MUL]] ; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[I_IV_NEXT]], 16 -; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-MAXBW: exit: ; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: ret i64 [[ADD_LCSSA]] @@ -2588,7 +2377,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2615,7 +2404,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[PTR]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: for.exit: ; CHECK-INTERLEAVED-NEXT: [[ADD_1_LCSSA:%.*]] = phi i32 [ [[ADD_1]], [[FOR_BODY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: [[ADD_FLOAT:%.*]] = sitofp i32 [[ADD_1_LCSSA]] to float @@ -2732,7 +2521,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2751,7 +2540,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVE1: exit.loopexit: ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT]] @@ -2792,7 +2581,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2812,7 +2601,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: exit.loopexit: ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: br label [[EXIT]] @@ -2853,7 +2642,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64( [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2872,7 +2661,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-MAXBW: exit.loopexit: ; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: br label [[EXIT]] @@ -2933,7 +2722,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2952,7 +2741,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVE1-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: exit.loopexit: ; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT]] @@ -2993,7 +2782,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -3013,7 +2802,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: exit.loopexit: ; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: br label [[EXIT]] @@ -3054,7 +2843,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64( [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -3073,7 +2862,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[ADD]] = add i64 [[MUL]], [[ACCUM]] ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-MAXBW-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-MAXBW-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[CMP_1]], label [[EXIT_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-MAXBW: exit.loopexit: ; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; CHECK-MAXBW-NEXT: br label [[EXIT]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll index 46340dea4bf8c..6e7f634ce9459 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll @@ -132,18 +132,49 @@ for.end: define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-LABEL: @mul( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 3 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP6:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP6]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x i32> poison) +; IF-EVL-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> splat (i32 1) +; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) +; IF-EVL-NEXT: [[MUL]] = mul i32 [[TMP5]], [[RDX]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 4 +; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[MUL]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX1:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[MUL]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] ; ; NO-VP-LABEL: @mul( @@ -1161,18 +1192,49 @@ for.end: define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmul( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 3 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP6:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP6]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x float> poison) +; IF-EVL-NEXT: [[TMP4:%.*]] = select reassoc <4 x i1> [[TMP1]], <4 x float> [[WIDE_MASKED_LOAD]], <4 x float> splat (float 1.000000e+00) +; IF-EVL-NEXT: [[TMP5:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) +; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP5]], [[RDX]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 4 +; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[MUL]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[MUL]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] ; ; NO-VP-LABEL: @fmul( @@ -1273,7 +1335,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: @@ -1289,7 +1351,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1395,7 +1457,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: @@ -1411,7 +1473,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1485,18 +1547,51 @@ for.end: define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fminimum( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP8:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP8]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) +; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP29:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] ; ; NO-VP-LABEL: @fminimum( @@ -1566,18 +1661,51 @@ for.end: define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmaximum( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP8:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP8]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) +; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP31:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] ; ; NO-VP-LABEL: @fmaximum( @@ -1682,7 +1810,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: @@ -1699,7 +1827,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] @@ -1810,7 +1938,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] @@ -1829,7 +1957,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] @@ -1937,7 +2065,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] @@ -1956,7 +2084,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll index 7557c10892d6d..b81805d473002 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll @@ -133,18 +133,50 @@ for.end: define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-LABEL: @mul( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <8 x i32> splat (i32 1), i32 [[START:%.*]], i32 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP1:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], +; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]] +; IF-EVL-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP4]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison) +; IF-EVL-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] +; IF-EVL-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP5]], <8 x i32> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] ; ; NO-VP-LABEL: @mul( @@ -1202,18 +1234,50 @@ for.end: define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmul( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <8 x float> splat (float 1.000000e+00), float [[START:%.*]], i32 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP1:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], +; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP1]] +; IF-EVL-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP4]], i32 4, <8 x i1> [[TMP2]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] +; IF-EVL-NEXT: [[TMP6:%.*]] = select reassoc <8 x i1> [[TMP2]], <8 x float> [[TMP5]], <8 x float> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] ; ; NO-VP-LABEL: @fmul( @@ -1316,7 +1380,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -1333,7 +1397,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1443,7 +1507,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -1460,7 +1524,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1536,18 +1600,51 @@ for.end: define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fminimum( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP8:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP8]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) +; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP29:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] ; ; NO-VP-LABEL: @fminimum( @@ -1617,18 +1714,51 @@ for.end: define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmaximum( ; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP8:%.*]] = add i64 [[IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP8]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) +; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4]] +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP31:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] ; ; NO-VP-LABEL: @fmaximum( @@ -1733,7 +1863,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP17]]) ; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -1751,7 +1881,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] @@ -1862,7 +1992,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] @@ -1881,7 +2011,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] @@ -1989,7 +2119,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] @@ -2008,7 +2138,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll new file mode 100644 index 0000000000000..bb85b88f181f7 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll @@ -0,0 +1,74 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --filter "Cost of" +; RUN: opt -S -passes=loop-vectorize -mcpu=skylake-avx512 -mtriple=x86_64-apple-macosx -debug -disable-output -S %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + +define void @wide_or_replaced_with_add_vpinstruction(ptr %src, ptr noalias %dst) { +; CHECK-LABEL: 'wide_or_replaced_with_add_vpinstruction' +; CHECK: Cost of 1 for VF 2: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 2: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 2: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; CHECK: Cost of 0 for VF 2: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK: Cost of 0 for VF 2: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0> +; CHECK: Cost of 0 for VF 2: vp<%4> = SCALAR-STEPS vp<%3>, ir<1> +; CHECK: Cost of 0 for VF 2: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4> +; CHECK: Cost of 0 for VF 2: vp<%5> = vector-pointer ir<%g.src> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%l> = load vp<%5> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%iv.4> = add ir<%iv>, ir<4> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%c> = icmp ule ir<%l>, ir<128> +; CHECK: Cost of 1 for VF 2: EMIT ir<%or> = add ir<%iv.4>, ir<1> +; CHECK: Cost of 0 for VF 2: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or> +; CHECK: Cost of 0 for VF 2: vp<%6> = vector-pointer ir<%g.dst> +; CHECK: Cost of 1 for VF 2: WIDEN store vp<%6>, ir<%iv.4>, ir<%c> +; CHECK: Cost of 0 for VF 2: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1> +; CHECK: Cost of 0 for VF 2: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK: Cost of 0 for VF 2: vector loop backedge +; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; CHECK: Cost of 0 for VF 4: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK: Cost of 0 for VF 4: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0> +; CHECK: Cost of 0 for VF 4: vp<%4> = SCALAR-STEPS vp<%3>, ir<1> +; CHECK: Cost of 0 for VF 4: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4> +; CHECK: Cost of 0 for VF 4: vp<%5> = vector-pointer ir<%g.src> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%l> = load vp<%5> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%iv.4> = add ir<%iv>, ir<4> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%c> = icmp ule ir<%l>, ir<128> +; CHECK: Cost of 1 for VF 4: EMIT ir<%or> = add ir<%iv.4>, ir<1> +; CHECK: Cost of 0 for VF 4: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or> +; CHECK: Cost of 0 for VF 4: vp<%6> = vector-pointer ir<%g.dst> +; CHECK: Cost of 1 for VF 4: WIDEN store vp<%6>, ir<%iv.4>, ir<%c> +; CHECK: Cost of 0 for VF 4: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1> +; CHECK: Cost of 0 for VF 4: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK: Cost of 0 for VF 4: vector loop backedge +; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %g.src = getelementptr inbounds i64, ptr %src, i64 %iv + %l = load i64, ptr %g.src + %iv.4 = add nuw nsw i64 %iv, 4 + %c = icmp ule i64 %l, 128 + br i1 %c, label %loop.then, label %loop.latch + +loop.then: + %or = or disjoint i64 %iv.4, 1 + %g.dst = getelementptr inbounds i64, ptr %dst, i64 %or + store i64 %iv.4, ptr %g.dst, align 4 + br label %loop.latch + +loop.latch: + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 32 + br i1 %exitcond, label %exit, label %loop.header + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/outer-loop-wide-phis.ll b/llvm/test/Transforms/LoopVectorize/outer-loop-wide-phis.ll new file mode 100644 index 0000000000000..3f81c0f5c822a --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/outer-loop-wide-phis.ll @@ -0,0 +1,224 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 +; RUN: opt -passes=loop-vectorize -enable-vplan-native-path -S %s | FileCheck %s + +define void @wide_phi_2_predecessors(ptr noalias %A, ptr noalias %B, i32 %c, i1 %cond) { +; CHECK-LABEL: define void @wide_phi_2_predecessors( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[C:%.*]], i1 [[COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A]], <4 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[BROADCAST_SPLAT]], <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true)) +; CHECK-NEXT: br label %[[INNER_HEADER1:.*]] +; CHECK: [[INNER_HEADER1]]: +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[INNER_LATCH4:.*]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[INNER_LATCH4]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[B]], <4 x i64> [[VEC_PHI]] +; CHECK-NEXT: br i1 [[COND]], label %[[THEN3:.*]], label %[[INNER_LATCH4]] +; CHECK: [[THEN3]]: +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[TMP1]], i32 8, <4 x i1> splat (i1 true), <4 x i64> poison) +; CHECK-NEXT: br label %[[INNER_LATCH4]] +; CHECK: [[INNER_LATCH4]]: +; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i64> [ [[WIDE_MASKED_GATHER]], %[[THEN3]] ], [ zeroinitializer, %[[INNER_HEADER1]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i64> [[VEC_PHI5]], [[VEC_IND]] +; CHECK-NEXT: [[TMP3]] = add nsw <4 x i64> [[TMP2]], [[VEC_PHI2]] +; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 1000) +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0 +; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[INNER_HEADER1]] +; CHECK: [[VECTOR_LATCH]]: +; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i64> [ [[TMP3]], %[[INNER_LATCH4]] ] +; CHECK-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[VEC_PHI6]], <4 x ptr> [[TMP0]], i32 8, <4 x i1> splat (i1 true)) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[OUTER_HEADER:.*]] +; CHECK: [[OUTER_HEADER]]: +; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[OUTER_IV]] +; CHECK-NEXT: store i32 [[C]], ptr [[GEP_A]], align 4 +; CHECK-NEXT: br label %[[INNER_HEADER:.*]] +; CHECK: [[INNER_HEADER]]: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER_LATCH:.*]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[RED_NEXT:%.*]], %[[INNER_LATCH]] ] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INNER_IV]] +; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[INNER_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 +; CHECK-NEXT: br label %[[INNER_LATCH]] +; CHECK: [[INNER_LATCH]]: +; CHECK-NEXT: [[P:%.*]] = phi i64 [ [[L_B]], %[[THEN]] ], [ 0, %[[INNER_HEADER]] ] +; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i64 [[P]], [[OUTER_IV]] +; CHECK-NEXT: [[RED_NEXT]] = add nsw i64 [[ADD_1]], [[RED]] +; CHECK-NEXT: [[INNER_IV_NEXT]] = add nuw nsw i64 [[INNER_IV]], 1 +; CHECK-NEXT: [[INNER_EC:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 1000 +; CHECK-NEXT: br i1 [[INNER_EC]], label %[[OUTER_LATCH]], label %[[INNER_HEADER]] +; CHECK: [[OUTER_LATCH]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[INNER_LATCH]] ] +; CHECK-NEXT: store i64 [[RED_NEXT_LCSSA]], ptr [[GEP_A]], align 8 +; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nuw nsw i64 [[OUTER_IV]], 1 +; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 1000 +; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT]], label %[[OUTER_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %outer.header + +outer.header: ; preds = %outer.latch, %outer.header.lr.ph + %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ] + %gep.A = getelementptr inbounds i64, ptr %A, i64 %outer.iv + store i32 %c, ptr %gep.A, align 4 + br label %inner.header + +inner.header: + %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.latch ] + %red = phi i64 [ 0, %outer.header ], [ %red.next, %inner.latch ] + %gep.B = getelementptr inbounds i64, ptr %B, i64 %inner.iv + br i1 %cond, label %then, label %inner.latch + +then: + %l.b = load i64, ptr %gep.B, align 8 + br label %inner.latch + +inner.latch: + %p = phi i64 [ %l.b, %then ], [ 0, %inner.header ] + %add.1 = add nsw i64 %p, %outer.iv + %red.next = add nsw i64 %add.1, %red + %inner.iv.next = add nuw nsw i64 %inner.iv, 1 + %inner.ec = icmp eq i64 %inner.iv.next, 1000 + br i1 %inner.ec, label %outer.latch, label %inner.header + +outer.latch: + store i64 %red.next, ptr %gep.A, align 8 + %outer.iv.next = add nuw nsw i64 %outer.iv, 1 + %outer.ec = icmp eq i64 %outer.iv.next, 1000 + br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !1 + +exit: + ret void +} + +define void @wide_phi_2_predecessors_phi_ops_swapped(ptr noalias %A, ptr noalias %B, i32 %c, i1 %cond) { +; CHECK-LABEL: define void @wide_phi_2_predecessors_phi_ops_swapped( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[C:%.*]], i1 [[COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A]], <4 x i64> [[VEC_IND]] +; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[BROADCAST_SPLAT]], <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true)) +; CHECK-NEXT: br label %[[INNER_HEADER1:.*]] +; CHECK: [[INNER_HEADER1]]: +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[INNER_LATCH4:.*]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[INNER_LATCH4]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[B]], <4 x i64> [[VEC_PHI]] +; CHECK-NEXT: br i1 [[COND]], label %[[THEN3:.*]], label %[[INNER_LATCH4]] +; CHECK: [[THEN3]]: +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[TMP1]], i32 8, <4 x i1> splat (i1 true), <4 x i64> poison) +; CHECK-NEXT: br label %[[INNER_LATCH4]] +; CHECK: [[INNER_LATCH4]]: +; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i64> [ zeroinitializer, %[[INNER_HEADER1]] ], [ [[WIDE_MASKED_GATHER]], %[[THEN3]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i64> [[VEC_PHI5]], [[VEC_IND]] +; CHECK-NEXT: [[TMP3]] = add nsw <4 x i64> [[TMP2]], [[VEC_PHI2]] +; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 1000) +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0 +; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[INNER_HEADER1]] +; CHECK: [[VECTOR_LATCH]]: +; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i64> [ [[TMP3]], %[[INNER_LATCH4]] ] +; CHECK-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[VEC_PHI6]], <4 x ptr> [[TMP0]], i32 8, <4 x i1> splat (i1 true)) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[OUTER_HEADER:.*]] +; CHECK: [[OUTER_HEADER]]: +; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[OUTER_IV]] +; CHECK-NEXT: store i32 [[C]], ptr [[GEP_A]], align 4 +; CHECK-NEXT: br label %[[INNER_HEADER:.*]] +; CHECK: [[INNER_HEADER]]: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER_LATCH:.*]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[RED_NEXT:%.*]], %[[INNER_LATCH]] ] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INNER_IV]] +; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[INNER_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 +; CHECK-NEXT: br label %[[INNER_LATCH]] +; CHECK: [[INNER_LATCH]]: +; CHECK-NEXT: [[P:%.*]] = phi i64 [ 0, %[[INNER_HEADER]] ], [ [[L_B]], %[[THEN]] ] +; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i64 [[P]], [[OUTER_IV]] +; CHECK-NEXT: [[RED_NEXT]] = add nsw i64 [[ADD_1]], [[RED]] +; CHECK-NEXT: [[INNER_IV_NEXT]] = add nuw nsw i64 [[INNER_IV]], 1 +; CHECK-NEXT: [[INNER_EC:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 1000 +; CHECK-NEXT: br i1 [[INNER_EC]], label %[[OUTER_LATCH]], label %[[INNER_HEADER]] +; CHECK: [[OUTER_LATCH]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[INNER_LATCH]] ] +; CHECK-NEXT: store i64 [[RED_NEXT_LCSSA]], ptr [[GEP_A]], align 8 +; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nuw nsw i64 [[OUTER_IV]], 1 +; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 1000 +; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT]], label %[[OUTER_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %outer.header + +outer.header: ; preds = %outer.latch, %outer.header.lr.ph + %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ] + %gep.A = getelementptr inbounds i64, ptr %A, i64 %outer.iv + store i32 %c, ptr %gep.A, align 4 + br label %inner.header + +inner.header: + %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.latch ] + %red = phi i64 [ 0, %outer.header ], [ %red.next, %inner.latch ] + %gep.B = getelementptr inbounds i64, ptr %B, i64 %inner.iv + br i1 %cond, label %then, label %inner.latch + +then: + %l.b = load i64, ptr %gep.B, align 8 + br label %inner.latch + +inner.latch: + %p = phi i64 [ 0, %inner.header ], [ %l.b, %then ] + %add.1 = add nsw i64 %p, %outer.iv + %red.next = add nsw i64 %add.1, %red + %inner.iv.next = add nuw nsw i64 %inner.iv, 1 + %inner.ec = icmp eq i64 %inner.iv.next, 1000 + br i1 %inner.ec, label %outer.latch, label %inner.header + +outer.latch: + store i64 %red.next, ptr %gep.A, align 8 + %outer.iv.next = add nuw nsw i64 %outer.iv, 1 + %outer.ec = icmp eq i64 %outer.iv.next, 1000 + br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = distinct !{!1, !2, !3} +!2 = !{!"llvm.loop.vectorize.width", i32 4} +!3 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll index 9e7c667f9c8ad..760bdbf227fff 100644 --- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll @@ -217,3 +217,94 @@ exit: %.lcssa = phi float [ %rdx.next, %loop ] ret float %.lcssa } + +define float @fadd_reduction_with_live_in(float %inc) { +; CHECK-LABEL: define float @fadd_reduction_with_live_in( +; CHECK-SAME: float [[INC:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IV:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[VEC_IV1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[VEC_IV]], 1000 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[VEC_IV1]], 1000 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP0]], float [[INC]], float -0.000000e+00 +; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[VEC_PHI]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP1]], float [[INC]], float -0.000000e+00 +; CHECK-NEXT: [[TMP5]] = fadd float [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 1002, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: exit: +; CHECK-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret float [[LCSSA]] +; +; CHECK-ALM-LABEL: define float @fadd_reduction_with_live_in( +; CHECK-ALM-SAME: float [[INC:%.*]]) { +; CHECK-ALM-NEXT: entry: +; CHECK-ALM-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-ALM: vector.ph: +; CHECK-ALM-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK-ALM: vector.body: +; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-ALM-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-ALM-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-ALM-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-ALM-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ult i32 [[TMP0]], 1001 +; CHECK-ALM-NEXT: [[ACTIVE_LANE_MASK1:%.*]] = icmp ult i32 [[TMP1]], 1001 +; CHECK-ALM-NEXT: [[TMP2:%.*]] = select i1 [[ACTIVE_LANE_MASK]], float [[INC]], float -0.000000e+00 +; CHECK-ALM-NEXT: [[TMP3:%.*]] = fadd float [[VEC_PHI]], [[TMP2]] +; CHECK-ALM-NEXT: [[TMP4:%.*]] = select i1 [[ACTIVE_LANE_MASK1]], float [[INC]], float -0.000000e+00 +; CHECK-ALM-NEXT: [[TMP5]] = fadd float [[TMP3]], [[TMP4]] +; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 +; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 +; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-ALM: middle.block: +; CHECK-ALM-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-ALM: scalar.ph: +; CHECK-ALM-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 1002, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-ALM-NEXT: br label [[LOOP:%.*]] +; CHECK-ALM: loop: +; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] +; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 +; CHECK-ALM-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-ALM: exit: +; CHECK-ALM-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] +; CHECK-ALM-NEXT: ret float [[LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %sum = phi float [ 0.000000e+00, %entry ], [ %sum.next, %loop ] + %sum.next = fadd float %sum, %inc + %iv.next = add i32 %iv, 1 + %ec = icmp eq i32 %iv, 1000 + br i1 %ec, label %exit, label %loop + +exit: + %lcssa = phi float [ %sum.next, %loop ] + ret float %lcssa +} diff --git a/llvm/test/Transforms/OpenMP/spmdization.ll b/llvm/test/Transforms/OpenMP/spmdization.ll index a0596ad26046c..1a629ecfee06d 100644 --- a/llvm/test/Transforms/OpenMP/spmdization.ll +++ b/llvm/test/Transforms/OpenMP/spmdization.ll @@ -150,19 +150,14 @@ ;. ; AMDGPU-DISABLED2: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c" ; AMDGPU-DISABLED2: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8 -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; AMDGPU-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; AMDGPU-DISABLED2: @x_shared = internal addrspace(3) global [4 x i8] poison, align 4 ; AMDGPU-DISABLED2: @x_shared.1 = internal addrspace(3) global [4 x i8] poison, align 4 -; AMDGPU-DISABLED2: @__omp_outlined__1_wrapper.ID = private constant i8 undef -; AMDGPU-DISABLED2: @__omp_outlined__3_wrapper.ID = private constant i8 undef -; AMDGPU-DISABLED2: @__omp_outlined__5_wrapper.ID = private constant i8 undef -; AMDGPU-DISABLED2: @__omp_outlined__7_wrapper.ID = private constant i8 undef -; AMDGPU-DISABLED2: @__omp_outlined__9_wrapper.ID = private constant i8 undef ;. ; NVPTX-DISABLED1: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c" ; NVPTX-DISABLED1: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8 @@ -182,19 +177,14 @@ ;. ; NVPTX-DISABLED2: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c" ; NVPTX-DISABLED2: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8 -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } -; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } +; NVPTX-DISABLED2: @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; NVPTX-DISABLED2: @x_shared = internal addrspace(3) global [4 x i8] poison, align 4 ; NVPTX-DISABLED2: @x_shared1 = internal addrspace(3) global [4 x i8] poison, align 4 -; NVPTX-DISABLED2: @__omp_outlined__1_wrapper.ID = private constant i8 undef -; NVPTX-DISABLED2: @__omp_outlined__3_wrapper.ID = private constant i8 undef -; NVPTX-DISABLED2: @__omp_outlined__5_wrapper.ID = private constant i8 undef -; NVPTX-DISABLED2: @__omp_outlined__7_wrapper.ID = private constant i8 undef -; NVPTX-DISABLED2: @__omp_outlined__9_wrapper.ID = private constant i8 undef ;. define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_l5() #0 { ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_l5 @@ -226,6 +216,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_l5() ; NVPTX-DISABLED2-SAME: () #[[ATTR0:[0-9]+]] { ; NVPTX-DISABLED2-NEXT: call void @__omp_offloading_fd02_2044372e_sequential_loop_l5__debug() ; NVPTX-DISABLED2-NEXT: ret void +; call void @__omp_offloading_fd02_2044372e_sequential_loop_l5__debug() ret void } @@ -319,43 +310,9 @@ define internal void @__omp_offloading_fd02_2044372e_sequential_loop_l5__debug() ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_l5__debug ; AMDGPU-DISABLED2-SAME: () #[[ATTR1:[0-9]+]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check: -; AMDGPU-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.execute: -; AMDGPU-DISABLED2-NEXT: call void @__omp_outlined__1_wrapper(i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check1: -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -420,42 +377,9 @@ define internal void @__omp_offloading_fd02_2044372e_sequential_loop_l5__debug() ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_l5__debug ; NVPTX-DISABLED2-SAME: () #[[ATTR1:[0-9]+]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_l5_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check: -; NVPTX-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.execute: -; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__1_wrapper(i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check1: -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -466,6 +390,7 @@ define internal void @__omp_offloading_fd02_2044372e_sequential_loop_l5__debug() ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 @@ -555,7 +480,7 @@ define internal void @__omp_outlined__(ptr noalias %.global_tid., ptr noalias %. ; AMDGPU-DISABLED2-NEXT: ret void ; AMDGPU-DISABLED2: for.body: ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; AMDGPU-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] ; @@ -591,9 +516,10 @@ define internal void @__omp_outlined__(ptr noalias %.global_tid., ptr noalias %. ; NVPTX-DISABLED2-NEXT: ret void ; NVPTX-DISABLED2: for.body: ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; NVPTX-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +; entry: %captured_vars_addrs = alloca [0 x ptr], align 8 br label %for.cond @@ -651,6 +577,7 @@ define internal void @__omp_outlined__1(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: entry: ; NVPTX-DISABLED2-NEXT: call void @unknown() #[[ATTR8:[0-9]+]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: call void @unknown() #11 ret void @@ -717,6 +644,7 @@ define internal void @__omp_outlined__1_wrapper(i16 zeroext %0, i32 %1) #3 { ; NVPTX-DISABLED2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 @@ -818,43 +746,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20 ; AMDGPU-DISABLED2-SAME: () #[[ATTR0]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check: -; AMDGPU-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.execute: -; AMDGPU-DISABLED2-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check1: -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -919,42 +813,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20 ; NVPTX-DISABLED2-SAME: () #[[ATTR0]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_stack_var_l20_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check: -; NVPTX-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.execute: -; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check1: -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -965,6 +826,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__2(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 @@ -1065,7 +927,7 @@ define internal void @__omp_outlined__2(ptr noalias %.global_tid., ptr noalias % ; AMDGPU-DISABLED2-NEXT: ret void ; AMDGPU-DISABLED2: for.body: ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; AMDGPU-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] ; @@ -1105,9 +967,10 @@ define internal void @__omp_outlined__2(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: ret void ; NVPTX-DISABLED2: for.body: ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; NVPTX-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +; entry: %captured_vars_addrs = alloca [0 x ptr], align 8 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4) @@ -1167,6 +1030,7 @@ define internal void @__omp_outlined__3(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: entry: ; NVPTX-DISABLED2-NEXT: call void @unknown() #[[ATTR8]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: call void @unknown() #11 ret void @@ -1233,6 +1097,7 @@ define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #3 { ; NVPTX-DISABLED2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 @@ -1335,43 +1200,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35 ; AMDGPU-DISABLED2-SAME: () #[[ATTR0]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check: -; AMDGPU-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.execute: -; AMDGPU-DISABLED2-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check1: -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -1436,42 +1267,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35 ; NVPTX-DISABLED2-SAME: () #[[ATTR0]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_l35_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check: -; NVPTX-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.execute: -; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check1: -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -1482,6 +1280,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 @@ -1575,7 +1374,7 @@ define internal void @__omp_outlined__4(ptr noalias %.global_tid., ptr noalias % ; AMDGPU-DISABLED2: for.body: ; AMDGPU-DISABLED2-NEXT: store ptr addrspacecast (ptr addrspace(3) @x_shared to ptr), ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA20:![0-9]+]] ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 1) +; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; AMDGPU-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] ; @@ -1613,9 +1412,10 @@ define internal void @__omp_outlined__4(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2: for.body: ; NVPTX-DISABLED2-NEXT: store ptr addrspacecast (ptr addrspace(3) @x_shared to ptr), ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA20:![0-9]+]] ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 1) +; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; NVPTX-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +; entry: %captured_vars_addrs = alloca [1 x ptr], align 8 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4) @@ -1694,6 +1494,7 @@ define internal void @__omp_outlined__5(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA12]] ; NVPTX-DISABLED2-NEXT: call void @unknown() #[[ATTR8]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %0 = load i32, ptr %x, align 4, !tbaa !18 %inc = add nsw i32 %0, 1 @@ -1775,6 +1576,7 @@ define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #3 { ; NVPTX-DISABLED2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA20]] ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 @@ -1878,43 +1680,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50 ; AMDGPU-DISABLED2-SAME: () #[[ATTR0]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check: -; AMDGPU-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.execute: -; AMDGPU-DISABLED2-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check1: -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -1979,42 +1747,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50 ; NVPTX-DISABLED2-SAME: () #[[ATTR0]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_sequential_loop_to_shared_var_guarded_l50_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check: -; NVPTX-DISABLED2-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.execute: -; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check1: -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -2025,6 +1760,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_sequential_loop_to_s ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 @@ -2148,7 +1884,7 @@ define internal void @__omp_outlined__6(ptr noalias %.global_tid., ptr noalias % ; AMDGPU-DISABLED2: for.body: ; AMDGPU-DISABLED2-NEXT: store ptr addrspacecast (ptr addrspace(3) @x_shared.1 to ptr), ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA20]] ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 1) +; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; AMDGPU-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] ; @@ -2188,9 +1924,10 @@ define internal void @__omp_outlined__6(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2: for.body: ; NVPTX-DISABLED2-NEXT: store ptr addrspacecast (ptr addrspace(3) @x_shared1 to ptr), ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA20]] ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA12]] -; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 1) +; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; NVPTX-DISABLED2-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-DISABLED2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +; entry: %captured_vars_addrs = alloca [1 x ptr], align 8 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4) @@ -2270,6 +2007,7 @@ define internal void @__omp_outlined__7(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA12]] ; NVPTX-DISABLED2-NEXT: call void @unknowni32p(ptr [[X]]) #[[ATTR8]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %0 = load i32, ptr %x, align 4, !tbaa !18 %inc = add nsw i32 %0, 1 @@ -2351,6 +2089,7 @@ define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #3 { ; NVPTX-DISABLED2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA20]] ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 @@ -2506,39 +2245,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_targe ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65 ; AMDGPU-DISABLED2-SAME: () #[[ATTR0]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.fallback.execute: -; AMDGPU-DISABLED2-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -2597,38 +2306,9 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_targe ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65 ; NVPTX-DISABLED2-SAME: () #[[ATTR0]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_do_not_spmdize_target_l65_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.fallback.execute: -; NVPTX-DISABLED2-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -2638,6 +2318,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_targe ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__8(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 @@ -2694,6 +2375,7 @@ define internal void @__omp_outlined__8(ptr noalias %.global_tid., ptr noalias % ; NVPTX-DISABLED2-NEXT: entry: ; NVPTX-DISABLED2-NEXT: call void @unknown() #[[ATTR8]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: call void @unknown() #11 ret void @@ -2862,44 +2544,8 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_task_ ; AMDGPU-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74 ; AMDGPU-DISABLED2-SAME: () #[[ATTR0]] { ; AMDGPU-DISABLED2-NEXT: entry: -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-DISABLED2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; AMDGPU-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment, ptr null) -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; AMDGPU-DISABLED2: is_worker_check: -; AMDGPU-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; AMDGPU-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; AMDGPU-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; AMDGPU-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; AMDGPU-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.begin: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) -; AMDGPU-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 -; AMDGPU-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.finished: -; AMDGPU-DISABLED2-NEXT: ret void -; AMDGPU-DISABLED2: worker_state_machine.is_active.check: -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.check: -; AMDGPU-DISABLED2-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__9_wrapper.ID -; AMDGPU-DISABLED2-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.execute: -; AMDGPU-DISABLED2-NEXT: call void @__omp_outlined__9_wrapper(i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.fallback.execute: -; AMDGPU-DISABLED2-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; AMDGPU-DISABLED2: worker_state_machine.parallel_region.end: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; AMDGPU-DISABLED2: worker_state_machine.done.barrier: -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; AMDGPU-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; AMDGPU-DISABLED2: thread.user_code.check: ; AMDGPU-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU-DISABLED2: common.ret: @@ -2908,7 +2554,7 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_task_ ; AMDGPU-DISABLED2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR4]] ; AMDGPU-DISABLED2-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 40, i64 0, ptr @"_omp_task_entry$") #[[ATTR4]] ; AMDGPU-DISABLED2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP2]]) #[[ATTR4]] -; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__9, ptr @__omp_outlined__9_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; AMDGPU-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__9, ptr @__omp_outlined__9_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; AMDGPU-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; AMDGPU-DISABLED2-NEXT: br label [[COMMON_RET]] ; @@ -2967,43 +2613,8 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_task_ ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74 ; NVPTX-DISABLED2-SAME: () #[[ATTR0]] { ; NVPTX-DISABLED2-NEXT: entry: -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-DISABLED2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; NVPTX-DISABLED2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment, ptr null) -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] -; NVPTX-DISABLED2: is_worker_check: -; NVPTX-DISABLED2-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() -; NVPTX-DISABLED2-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() -; NVPTX-DISABLED2-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] -; NVPTX-DISABLED2-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] -; NVPTX-DISABLED2-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] -; NVPTX-DISABLED2: worker_state_machine.begin: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) -; NVPTX-DISABLED2-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 -; NVPTX-DISABLED2-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] -; NVPTX-DISABLED2: worker_state_machine.finished: -; NVPTX-DISABLED2-NEXT: ret void -; NVPTX-DISABLED2: worker_state_machine.is_active.check: -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.check: -; NVPTX-DISABLED2-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__9_wrapper.ID -; NVPTX-DISABLED2-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.execute: -; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__9_wrapper(i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.fallback.execute: -; NVPTX-DISABLED2-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]] -; NVPTX-DISABLED2: worker_state_machine.parallel_region.end: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_kernel_end_parallel() -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] -; NVPTX-DISABLED2: worker_state_machine.done.barrier: -; NVPTX-DISABLED2-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) -; NVPTX-DISABLED2-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] -; NVPTX-DISABLED2: thread.user_code.check: ; NVPTX-DISABLED2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-DISABLED2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX-DISABLED2: common.ret: @@ -3012,9 +2623,10 @@ define weak ptx_kernel void @__omp_offloading_fd02_2044372e_do_not_spmdize_task_ ; NVPTX-DISABLED2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 40, i64 0, ptr @"_omp_task_entry$") #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP2]]) #[[ATTR4]] -; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__9, ptr @__omp_outlined__9_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0) +; NVPTX-DISABLED2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__9, ptr @__omp_outlined__9_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; NVPTX-DISABLED2-NEXT: call void @__kmpc_target_deinit() ; NVPTX-DISABLED2-NEXT: br label [[COMMON_RET]] +; entry: %captured_vars_addrs = alloca [0 x ptr], align 8 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_fd02_2044372e_do_not_spmdize_task_l74_kernel_environment, ptr null) @@ -3070,6 +2682,7 @@ define internal void @.omp_outlined.(i32 %.global_tid., ptr noalias %.part_id., ; NVPTX-DISABLED2-NEXT: entry: ; NVPTX-DISABLED2-NEXT: call void @spmd_amenable() #[[ATTR7]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: call void @spmd_amenable() #10 ret void @@ -3131,6 +2744,7 @@ define weak i32 @__kmpc_target_init(ptr, ptr) { ; NVPTX-DISABLED2-LABEL: define {{[^@]+}}@__kmpc_target_init ; NVPTX-DISABLED2-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) { ; NVPTX-DISABLED2-NEXT: ret i32 0 +; ret i32 0 } @@ -3255,6 +2869,7 @@ define internal void @__omp_outlined__9_wrapper(i16 zeroext %0, i32 %1) #3 { ; NVPTX-DISABLED2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-DISABLED2-NEXT: call void @__omp_outlined__9(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR4]] ; NVPTX-DISABLED2-NEXT: ret void +; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 @@ -3363,7 +2978,6 @@ attributes #11 = { convergent } ; AMDGPU-DISABLED2: attributes #[[ATTR8]] = { convergent } ; AMDGPU-DISABLED2: attributes #[[ATTR9:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } ; AMDGPU-DISABLED2: attributes #[[ATTR10:[0-9]+]] = { alwaysinline } -; AMDGPU-DISABLED2: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind } ;. ; NVPTX-DISABLED1: attributes #[[ATTR0]] = { alwaysinline convergent norecurse nounwind "kernel" } ; NVPTX-DISABLED1: attributes #[[ATTR1]] = { norecurse } @@ -3389,7 +3003,6 @@ attributes #11 = { convergent } ; NVPTX-DISABLED2: attributes #[[ATTR8]] = { convergent } ; NVPTX-DISABLED2: attributes #[[ATTR9:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } ; NVPTX-DISABLED2: attributes #[[ATTR10:[0-9]+]] = { alwaysinline } -; NVPTX-DISABLED2: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind } ;. ; AMDGPU: [[META0:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_task", i32 74, i32 5} ; AMDGPU: [[META1:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1} diff --git a/llvm/test/Transforms/PGOProfile/memprof-dump-matched-call-sites.ll b/llvm/test/Transforms/PGOProfile/memprof-dump-matched-call-sites.ll new file mode 100644 index 0000000000000..a5302895d0593 --- /dev/null +++ b/llvm/test/Transforms/PGOProfile/memprof-dump-matched-call-sites.ll @@ -0,0 +1,114 @@ +; Tests that the compiler dumps call site matches upon request. +; +; The test case is generated from: +; +; // main +; // | +; // f1 (noinline) +; // | +; // f2 +; // | +; // f3 (noinline) +; // | +; // new +; +; __attribute__((noinline)) char *f3() { return ::new char[4]; } +; +; static char *f2() { return f3(); } +; +; __attribute__((noinline)) static char *f1() { return f2(); } +; +; int main() { +; f1(); +; return 0; +; } +; +; Here we expect to match two inline call stacks: +; +; - [main] +; - [f1, f2] +; +; Note that f3 is considered to be an allocation site, not a call site, because +; it directly calls new after inlining. + +; REQUIRES: x86_64-linux +; RUN: split-file %s %t +; RUN: llvm-profdata merge %t/memprof-dump-matched-call-site.yaml -o %t/memprof-dump-matched-call-site.memprofdata +; RUN: opt < %t/memprof-dump-matched-call-site.ll -passes='memprof-use' -memprof-print-match-info -S 2>&1 | FileCheck %s + +;--- memprof-dump-matched-call-site.yaml +--- +HeapProfileRecords: + - GUID: main + AllocSites: [] + CallSites: + - - { Function: main, LineOffset: 1, Column: 3, IsInlineFrame: false } + - GUID: _ZL2f1v + AllocSites: [] + CallSites: + - - { Function: _ZL2f2v, LineOffset: 0, Column: 28, IsInlineFrame: true } + - { Function: _ZL2f1v, LineOffset: 0, Column: 54, IsInlineFrame: false } + - GUID: _ZL2f2v + AllocSites: [] + CallSites: + - - { Function: _ZL2f2v, LineOffset: 0, Column: 28, IsInlineFrame: true } + - { Function: _ZL2f1v, LineOffset: 0, Column: 54, IsInlineFrame: false } + - GUID: _Z2f3v + AllocSites: + - Callstack: + - { Function: _Z2f3v, LineOffset: 0, Column: 47, IsInlineFrame: false } + - { Function: _ZL2f2v, LineOffset: 0, Column: 28, IsInlineFrame: true } + - { Function: _ZL2f1v, LineOffset: 0, Column: 54, IsInlineFrame: false } + - { Function: main, LineOffset: 1, Column: 3, IsInlineFrame: false } + MemInfoBlock: + AllocCount: 1 + TotalSize: 4 + TotalLifetime: 0 + TotalLifetimeAccessDensity: 0 + CallSites: [] +... +;--- memprof-dump-matched-call-site.ll +; CHECK: MemProf notcold context with id 3894143216621363392 has total profiled size 4 is matched +; CHECK: MemProf callsite match for inline call stack 4745611964195289084 10616861955219347331 +; CHECK: MemProf callsite match for inline call stack 5401059281181789382 + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define ptr @_Z2f3v() { +entry: + %call = call ptr @_Znam(i64 0), !dbg !3 + ret ptr null +} + +declare ptr @_Znam(i64) + +define i32 @main() { +entry: + call void @_ZL2f1v(), !dbg !7 + ret i32 0 +} + +define void @_ZL2f1v() { +entry: + %call.i = call ptr @_Z2f3v(), !dbg !9 + ret void +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1) +!1 = !DIFile(filename: "match.cc", directory: "/") +!2 = !{i32 2, !"Debug Info Version", i32 3} +!3 = !DILocation(line: 11, column: 47, scope: !4) +!4 = distinct !DISubprogram(name: "f3", linkageName: "_Z2f3v", scope: !1, file: !1, line: 11, type: !5, scopeLine: 11, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!5 = !DISubroutineType(types: !6) +!6 = !{} +!7 = !DILocation(line: 18, column: 3, scope: !8) +!8 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 17, type: !5, scopeLine: 17, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!9 = !DILocation(line: 13, column: 28, scope: !10, inlinedAt: !11) +!10 = distinct !DISubprogram(name: "f2", linkageName: "_ZL2f2v", scope: !1, file: !1, line: 13, type: !5, scopeLine: 13, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!11 = distinct !DILocation(line: 15, column: 54, scope: !12) +!12 = distinct !DISubprogram(name: "f1", linkageName: "_ZL2f1v", scope: !1, file: !1, line: 15, type: !13, scopeLine: 15, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!13 = !DISubroutineType(cc: DW_CC_nocall, types: !6) diff --git a/llvm/test/Transforms/PGOProfile/memprof.ll b/llvm/test/Transforms/PGOProfile/memprof.ll index f0421ba60cffc..5a958de5f7f8d 100644 --- a/llvm/test/Transforms/PGOProfile/memprof.ll +++ b/llvm/test/Transforms/PGOProfile/memprof.ll @@ -101,6 +101,16 @@ ; MEMPROFMATCHINFO: MemProf cold context with id 15737101490731057601 has total profiled size 10 is matched ; MEMPROFMATCHINFO: MemProf cold context with id 16342802530253093571 has total profiled size 10 is matched ; MEMPROFMATCHINFO: MemProf cold context with id 18254812774972004394 has total profiled size 10 is matched +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 748269490701775343 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 1544787832369987002 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 2061451396820446691 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 2104812325165620841 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 6281715513834610934 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 8467819354083268568 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 8690657650969109624 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 9086428284934609951 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 12481870273128938184 +; MEMPROFMATCHINFO: MemProf callsite match for inline call stack 12699492813229484831 ; ModuleID = 'memprof.cc' source_filename = "memprof.cc" diff --git a/llvm/test/Transforms/SandboxVectorizer/X86/lit.local.cfg b/llvm/test/Transforms/SandboxVectorizer/X86/lit.local.cfg new file mode 100644 index 0000000000000..42bf50dcc13c3 --- /dev/null +++ b/llvm/test/Transforms/SandboxVectorizer/X86/lit.local.cfg @@ -0,0 +1,2 @@ +if not "X86" in config.root.targets: + config.unsupported = True diff --git a/llvm/test/Transforms/SandboxVectorizer/X86/simple_cost_test.ll b/llvm/test/Transforms/SandboxVectorizer/X86/simple_cost_test.ll new file mode 100644 index 0000000000000..f1df52bd88ad7 --- /dev/null +++ b/llvm/test/Transforms/SandboxVectorizer/X86/simple_cost_test.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=sandbox-vectorizer -mtriple=x86_64-- -mattr=+sse4.1 %s -S -sbvec-cost-threshold=0 | FileCheck %s --check-prefix=THRESHOLD_0 +; RUN: opt -passes=sandbox-vectorizer -mtriple=x86_64-- -mattr=+sse4.1 %s -S -sbvec-cost-threshold=99 | FileCheck %s --check-prefix=THRESHOLD_99 + +define void @simple_cost_test(ptr %ptr) { +; THRESHOLD_0-LABEL: define void @simple_cost_test( +; THRESHOLD_0-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] { +; THRESHOLD_0-NEXT: [[PTR0:%.*]] = getelementptr double, ptr [[PTR]], i32 0 +; THRESHOLD_0-NEXT: [[VECL:%.*]] = load <2 x double>, ptr [[PTR0]], align 8, !sandboxvec [[META0:![0-9]+]] +; THRESHOLD_0-NEXT: store <2 x double> [[VECL]], ptr [[PTR0]], align 8, !sandboxvec [[META0]] +; THRESHOLD_0-NEXT: ret void +; +; THRESHOLD_99-LABEL: define void @simple_cost_test( +; THRESHOLD_99-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] { +; THRESHOLD_99-NEXT: [[PTR0:%.*]] = getelementptr double, ptr [[PTR]], i32 0 +; THRESHOLD_99-NEXT: [[PTR1:%.*]] = getelementptr double, ptr [[PTR]], i32 1, !sandboxvec [[META0:![0-9]+]] +; THRESHOLD_99-NEXT: [[LD0:%.*]] = load double, ptr [[PTR0]], align 8, !sandboxvec [[META0]] +; THRESHOLD_99-NEXT: [[LD1:%.*]] = load double, ptr [[PTR1]], align 8, !sandboxvec [[META0]] +; THRESHOLD_99-NEXT: store double [[LD0]], ptr [[PTR0]], align 8, !sandboxvec [[META0]] +; THRESHOLD_99-NEXT: store double [[LD1]], ptr [[PTR1]], align 8, !sandboxvec [[META0]] +; THRESHOLD_99-NEXT: ret void +; + %ptr0 = getelementptr double, ptr %ptr, i32 0 + %ptr1 = getelementptr double, ptr %ptr, i32 1 + %ld0 = load double, ptr %ptr0 + %ld1 = load double, ptr %ptr1 + store double %ld0, ptr %ptr0 + store double %ld1, ptr %ptr1 + ret void +} + +define void @pack_cost_test_(ptr %ptr) { +; THRESHOLD_0-LABEL: define void @pack_cost_test_( +; THRESHOLD_0-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] { +; THRESHOLD_0-NEXT: [[PTR0:%.*]] = getelementptr float, ptr [[PTR]], i32 0 +; THRESHOLD_0-NEXT: [[PTR1:%.*]] = getelementptr float, ptr [[PTR]], i32 1 +; THRESHOLD_0-NEXT: [[LD0:%.*]] = load float, ptr [[PTR0]], align 4 +; THRESHOLD_0-NEXT: [[LD1:%.*]] = load float, ptr [[PTR1]], align 4 +; THRESHOLD_0-NEXT: [[PACK4:%.*]] = insertelement <4 x float> poison, float [[LD0]], i32 0, !sandboxvec [[META1:![0-9]+]] +; THRESHOLD_0-NEXT: [[PACK5:%.*]] = insertelement <4 x float> [[PACK4]], float [[LD1]], i32 1, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK6:%.*]] = insertelement <4 x float> [[PACK5]], float [[LD0]], i32 2, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK7:%.*]] = insertelement <4 x float> [[PACK6]], float [[LD1]], i32 3, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK:%.*]] = insertelement <4 x float> poison, float [[LD0]], i32 0, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK1:%.*]] = insertelement <4 x float> [[PACK]], float [[LD1]], i32 1, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK2:%.*]] = insertelement <4 x float> [[PACK1]], float [[LD0]], i32 2, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[PACK3:%.*]] = insertelement <4 x float> [[PACK2]], float [[LD1]], i32 3, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: [[VEC:%.*]] = fmul <4 x float> [[PACK3]], [[PACK7]], !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: store <4 x float> [[VEC]], ptr [[PTR0]], align 4, !sandboxvec [[META1]] +; THRESHOLD_0-NEXT: ret void +; +; THRESHOLD_99-LABEL: define void @pack_cost_test_( +; THRESHOLD_99-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] { +; THRESHOLD_99-NEXT: [[PTR0:%.*]] = getelementptr float, ptr [[PTR]], i32 0 +; THRESHOLD_99-NEXT: [[PTR1:%.*]] = getelementptr float, ptr [[PTR]], i32 1 +; THRESHOLD_99-NEXT: [[PTR2:%.*]] = getelementptr float, ptr [[PTR]], i32 2, !sandboxvec [[META1:![0-9]+]] +; THRESHOLD_99-NEXT: [[PTR3:%.*]] = getelementptr float, ptr [[PTR]], i32 3, !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: [[LD0:%.*]] = load float, ptr [[PTR0]], align 4 +; THRESHOLD_99-NEXT: [[LD1:%.*]] = load float, ptr [[PTR1]], align 4 +; THRESHOLD_99-NEXT: [[MUL0:%.*]] = fmul float [[LD0]], [[LD0]], !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: [[MUL1:%.*]] = fmul float [[LD1]], [[LD1]], !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: [[MUL2:%.*]] = fmul float [[LD0]], [[LD0]], !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: [[MUL3:%.*]] = fmul float [[LD1]], [[LD1]], !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: store float [[MUL0]], ptr [[PTR0]], align 4, !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: store float [[MUL1]], ptr [[PTR1]], align 4, !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: store float [[MUL2]], ptr [[PTR2]], align 4, !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: store float [[MUL3]], ptr [[PTR3]], align 4, !sandboxvec [[META1]] +; THRESHOLD_99-NEXT: ret void +; + %ptr0 = getelementptr float, ptr %ptr, i32 0 + %ptr1 = getelementptr float, ptr %ptr, i32 1 + %ptr2 = getelementptr float, ptr %ptr, i32 2 + %ptr3 = getelementptr float, ptr %ptr, i32 3 + %ld0 = load float, ptr %ptr0 + %ld1 = load float, ptr %ptr1 + %mul0 = fmul float %ld0, %ld0 + %mul1 = fmul float %ld1, %ld1 + %mul2 = fmul float %ld0, %ld0 + %mul3 = fmul float %ld1, %ld1 + store float %mul0, ptr %ptr0 + store float %mul1, ptr %ptr1 + store float %mul2, ptr %ptr2 + store float %mul3, ptr %ptr3 + ret void +} +;. +; THRESHOLD_0: [[META0]] = distinct !{!"sandboxregion"} +; THRESHOLD_0: [[META1]] = distinct !{!"sandboxregion"} +;. +; THRESHOLD_99: [[META0]] = distinct !{!"sandboxregion"} +; THRESHOLD_99: [[META1]] = distinct !{!"sandboxregion"} +;. diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll index ee5a3a514b3c5..ee8592c04b62c 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s define void @store_load(ptr %ptr) { ; CHECK-LABEL: define void @store_load( diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll index 8459c3addaa83..202b5a6fbd6c9 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s declare void @foo() diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll index e186d5fa86e4a..f1c6e3297d79c 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=false -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s --check-prefix=POW2 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=true -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s --check-prefix=NON-POW2 +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=false -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s --check-prefix=POW2 +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=true -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s --check-prefix=NON-POW2 define void @pow2(ptr %ptr, float %val) { ; POW2-LABEL: define void @pow2( diff --git a/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll b/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll index 6ec31060d7e0f..ff1604173c317 100644 --- a/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll +++ b/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s define void @cross_bbs(ptr %ptr) { ; CHECK-LABEL: define void @cross_bbs( diff --git a/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll b/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll index 1d7be43336c87..10de4338caf23 100644 --- a/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll +++ b/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll @@ -4,8 +4,10 @@ ; This checks the default pass pipeline for the sandbox vectorizer. define void @pipeline() { +; CHECK: fpm ; CHECK: bottom-up-vec ; CHECK: rpm +; CHECK: tr-accept-or-revert ; CHECK-EMPTY: ret void } diff --git a/llvm/test/Transforms/SandboxVectorizer/pack.ll b/llvm/test/Transforms/SandboxVectorizer/pack.ll index ec6e61a90c0fb..da41036e3a58b 100644 --- a/llvm/test/Transforms/SandboxVectorizer/pack.ll +++ b/llvm/test/Transforms/SandboxVectorizer/pack.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s define void @pack_constants(ptr %ptr) { ; CHECK-LABEL: define void @pack_constants( diff --git a/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll b/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll index 6026e92ef9a82..25d9d79154d35 100644 --- a/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll +++ b/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s define i32 @repeated_splat(ptr %ptr, i32 %v) #0 { ; CHECK-LABEL: define i32 @repeated_splat( diff --git a/llvm/test/Transforms/SandboxVectorizer/scheduler.ll b/llvm/test/Transforms/SandboxVectorizer/scheduler.ll index 847c978aa4912..92a78a979192b 100644 --- a/llvm/test/Transforms/SandboxVectorizer/scheduler.ll +++ b/llvm/test/Transforms/SandboxVectorizer/scheduler.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s ; This used to crash because the newly added pack instructions would not update ; the DAG and scheduler, leading to def-after-use. diff --git a/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll b/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll index fe3a2067d481d..e8fe8b4fa88e3 100644 --- a/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll +++ b/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec<>" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s ; This file includes tests for opcodes that need special checks. diff --git a/llvm/test/tools/llvm-mca/ARM/m55-fp.s b/llvm/test/tools/llvm-mca/ARM/m55-fp.s index 6318cfa9d6e9c..1668f58c7937f 100644 --- a/llvm/test/tools/llvm-mca/ARM/m55-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m55-fp.s @@ -21,30 +21,30 @@ vcmpe.f32 s1, #0.0 vcmpe.f64 d1, #0.0 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f16.u16 s1, s2, #8 -vcvt.f16.s16 s1, s2, #8 -vcvt.f16.u32 s1, s2, #8 -vcvt.f16.s32 s1, s2, #8 -vcvt.u16.f16 s1, s2, #8 -vcvt.s16.f16 s1, s2, #8 -vcvt.u32.f16 s1, s2, #8 -vcvt.s32.f16 s1, s2, #8 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f16.u16 s1, s1, #8 +vcvt.f16.s16 s1, s1, #8 +vcvt.f16.u32 s1, s1, #8 +vcvt.f16.s32 s1, s1, #8 +vcvt.u16.f16 s1, s1, #8 +vcvt.s16.f16 s1, s1, #8 +vcvt.u32.f16 s1, s1, #8 +vcvt.s32.f16 s1, s1, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f16 s1, s2 vcvt.s32.f16 s1, s2 vcvt.u32.f32 s1, s2 diff --git a/llvm/test/tools/llvm-mca/ARM/m7-fp.s b/llvm/test/tools/llvm-mca/ARM/m7-fp.s index dcf9723461dec..dba7ff92f30cb 100644 --- a/llvm/test/tools/llvm-mca/ARM/m7-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m7-fp.s @@ -9,22 +9,22 @@ vcmp.f32 s1, s2 vcmp.f64 d1, d2 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f32 s1, s2 vcvt.s32.f32 s1, s2 vcvt.u32.f64 s1, d2 diff --git a/llvm/test/tools/llvm-mca/ARM/m85-fp.s b/llvm/test/tools/llvm-mca/ARM/m85-fp.s index edc46060fe0f3..0fc1b394de2dc 100644 --- a/llvm/test/tools/llvm-mca/ARM/m85-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m85-fp.s @@ -21,30 +21,30 @@ vcmpe.f32 s1, #0.0 vcmpe.f64 d1, #0.0 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f16.u16 s1, s2, #8 -vcvt.f16.s16 s1, s2, #8 -vcvt.f16.u32 s1, s2, #8 -vcvt.f16.s32 s1, s2, #8 -vcvt.u16.f16 s1, s2, #8 -vcvt.s16.f16 s1, s2, #8 -vcvt.u32.f16 s1, s2, #8 -vcvt.s32.f16 s1, s2, #8 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f16.u16 s1, s1, #8 +vcvt.f16.s16 s1, s1, #8 +vcvt.f16.u32 s1, s1, #8 +vcvt.f16.s32 s1, s1, #8 +vcvt.u16.f16 s1, s1, #8 +vcvt.s16.f16 s1, s1, #8 +vcvt.u32.f16 s1, s1, #8 +vcvt.s32.f16 s1, s1, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f16 s1, s2 vcvt.s32.f16 s1, s2 vcvt.u32.f32 s1, s2 diff --git a/llvm/test/tools/llvm-objcopy/ELF/dump-section.test b/llvm/test/tools/llvm-objcopy/ELF/dump-section.test index 037ec86090e55..2dbbcc0ca568e 100644 --- a/llvm/test/tools/llvm-objcopy/ELF/dump-section.test +++ b/llvm/test/tools/llvm-objcopy/ELF/dump-section.test @@ -64,3 +64,7 @@ ProgramHeaders: # RUN: not llvm-objcopy --dump-section .text= %t /dev/null 2>&1 | FileCheck %s --check-prefix=ERR2 # ERR2: error: bad format for --dump-section, expected section=file + +# RUN: not llvm-objcopy --dump-section .text=not_exists/text-section %t 2>&1 \ +# RUN: | FileCheck -DMSG=%errc_ENOENT %s -DINPUT=%t --check-prefix=NO-SUCH-PATH +# NO-SUCH-PATH: error: 'not_exists/text-section': [[MSG]] diff --git a/llvm/test/tools/llvm-objcopy/MachO/dump-section.test b/llvm/test/tools/llvm-objcopy/MachO/dump-section.test index 9a1227cdbbda1..d54a50b557bb7 100644 --- a/llvm/test/tools/llvm-objcopy/MachO/dump-section.test +++ b/llvm/test/tools/llvm-objcopy/MachO/dump-section.test @@ -21,6 +21,10 @@ # RUN: | FileCheck %s -DINPUT=%t --check-prefix=NO-SUCH-SECTION # NO-SUCH-SECTION: error: '[[INPUT]]': section '__TEXT,__foo' not found +# RUN: not llvm-objcopy --dump-section __TEXT,__text=not_exists/text-section %t 2>&1 \ +# RUN: | FileCheck -DMSG=%errc_ENOENT %s -DINPUT=%t --check-prefix=NO-SUCH-PATH +# NO-SUCH-PATH: error: 'not_exists/text-section': [[MSG]] + --- !mach-o FileHeader: magic: 0xFEEDFACF diff --git a/llvm/test/tools/llvm-objcopy/wasm/dump-section.test b/llvm/test/tools/llvm-objcopy/wasm/dump-section.test index 983a581e03fe2..2d1533f06df10 100644 --- a/llvm/test/tools/llvm-objcopy/wasm/dump-section.test +++ b/llvm/test/tools/llvm-objcopy/wasm/dump-section.test @@ -28,6 +28,10 @@ # REMOVED-NOT: producers +# RUN: not llvm-objcopy --dump-section producers=not_exists/text-section %t 2>&1 \ +# RUN: | FileCheck -DMSG=%errc_ENOENT %s -DINPUT=%t --check-prefix=NO-SUCH-PATH +# NO-SUCH-PATH: error: 'not_exists/text-section': [[MSG]] + --- !WASM FileHeader: Version: 0x00000001 diff --git a/llvm/test/tools/llvm-readobj/ELF/RISCV/note-gnu-property.yaml b/llvm/test/tools/llvm-readobj/ELF/RISCV/note-gnu-property.yaml new file mode 100644 index 0000000000000..f87ed762038c1 --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/RISCV/note-gnu-property.yaml @@ -0,0 +1,45 @@ +# RUN: yaml2obj %s -DBITS=32 -DPR_PADDING= -o %t1 +# RUN: llvm-readelf --notes %t1 | FileCheck %s --check-prefix=GNU +# RUN: llvm-readobj --notes %t1 | FileCheck %s --check-prefix=LLVM + +# RUN: yaml2obj %s -DBITS=64 -DPR_PADDING=00000000 -o %t2 +# RUN: llvm-readelf --notes %t2 | FileCheck %s --check-prefix=GNU +# RUN: llvm-readobj --notes %t2 | FileCheck %s --check-prefix=LLVM + +# GNU: Displaying notes found in: .note.gnu.property +# GNU-NEXT: Owner Data size Description +# GNU-NEXT: GNU 0x{{([0-9a-z]{8})}} NT_GNU_PROPERTY_TYPE_0 (property note) +# GNU-NEXT: Properties: RISC-V feature: ZICFILP-unlabeled, ZICFISS, ZICFILP-func-sig + +# LLVM: NoteSections [ +# LLVM-NEXT: NoteSection { +# LLVM-NEXT: Name: .note.gnu.property +# LLVM-NEXT: Offset: +# LLVM-NEXT: Size: +# LLVM-NEXT: Notes [ +# LLVM-NEXT: { +# LLVM-NEXT: Owner: GNU +# LLVM-NEXT: Data size: +# LLVM-NEXT: Type: NT_GNU_PROPERTY_TYPE_0 (property note) +# LLVM-NEXT: Property [ +# LLVM-NEXT: RISC-V feature: ZICFILP-unlabeled, ZICFISS, ZICFILP-func-sig +# LLVM-NEXT: ] +# LLVM-NEXT: } +# LLVM-NEXT: ] +# LLVM-NEXT: } +# LLVM-NEXT: ] + +--- !ELF +FileHeader: + Class: ELFCLASS[[BITS]] + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_RISCV +Sections: + - Name: .note.gnu.property + Type: SHT_NOTE + Flags: [ SHF_ALLOC ] + Notes: + - Name: 'GNU' + Desc: '000000c00400000007000000[[PR_PADDING]]' + Type: 5 # NT_GNU_PROPERTY_TYPE_0 diff --git a/llvm/tools/dsymutil/BinaryHolder.cpp b/llvm/tools/dsymutil/BinaryHolder.cpp index 5daaa6755b295..7588a33eb46b2 100644 --- a/llvm/tools/dsymutil/BinaryHolder.cpp +++ b/llvm/tools/dsymutil/BinaryHolder.cpp @@ -176,8 +176,8 @@ BinaryHolder::ArchiveEntry::getObjectEntry(StringRef Filename, // Try the cache first. std::lock_guard Lock(MemberCacheMutex); - if (MemberCache.count(Key)) - return *MemberCache[Key]; + if (auto It = MemberCache.find(Key); It != MemberCache.end()) + return *It->second; // Create a new ObjectEntry, but don't add it to the cache yet. Loading of // the archive members might fail and we don't want to lock the whole archive @@ -228,8 +228,7 @@ BinaryHolder::ArchiveEntry::getObjectEntry(StringRef Filename, if (OE->Objects.empty()) return errorCodeToError(errc::no_such_file_or_directory); - MemberCache[Key] = std::move(OE); - return *MemberCache[Key]; + return *(MemberCache[Key] = std::move(OE)); } Expected diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp index 86b89a38c1760..dbdec77327774 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp @@ -12,6 +12,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX, LLVM_ENABLE_THREADS +#include "llvm/ExecutionEngine/Orc/TargetProcess/DefaultHostBootstrapValues.h" #include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h" #include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h" #include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h" @@ -187,6 +188,8 @@ int main(int argc, char *argv[]) { std::make_unique()); S.bootstrapSymbols() = SimpleRemoteEPCServer::defaultBootstrapSymbols(); + addDefaultBootstrapValuesForHostProcess(S.bootstrapMap(), + S.bootstrapSymbols()); S.services().push_back( std::make_unique()); S.services().push_back( diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp index bfca65aad52b4..7806eea6a0c52 100644 --- a/llvm/tools/llvm-readobj/ELFDumper.cpp +++ b/llvm/tools/llvm-readobj/ELFDumper.cpp @@ -5333,7 +5333,8 @@ static bool printAArch64PAuthABICoreInfo(raw_ostream &OS, uint32_t DataSize, template static std::string getGNUProperty(uint32_t Type, uint32_t DataSize, - ArrayRef Data) { + ArrayRef Data, + typename ELFT::Half EMachine) { std::string str; raw_string_ostream OS(str); uint32_t PrData; @@ -5366,8 +5367,25 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize, return str; case GNU_PROPERTY_AARCH64_FEATURE_1_AND: case GNU_PROPERTY_X86_FEATURE_1_AND: - OS << ((Type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) ? "aarch64 feature: " - : "x86 feature: "); + static_assert(GNU_PROPERTY_AARCH64_FEATURE_1_AND == + GNU_PROPERTY_RISCV_FEATURE_1_AND, + "GNU_PROPERTY_RISCV_FEATURE_1_AND should equal " + "GNU_PROPERTY_AARCH64_FEATURE_1_AND, otherwise " + "GNU_PROPERTY_RISCV_FEATURE_1_AND would be skipped!"); + + if (EMachine == EM_AARCH64 && Type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { + OS << "aarch64 feature: "; + } else if (EMachine == EM_RISCV && + Type == GNU_PROPERTY_RISCV_FEATURE_1_AND) { + OS << "RISC-V feature: "; + } else if ((EMachine == EM_386 || EMachine == EM_X86_64) && + Type == GNU_PROPERTY_X86_FEATURE_1_AND) { + OS << "x86 feature: "; + } else { + OS << format("", Type); + return str; + } + if (DataSize != 4) { OS << format("", DataSize); return str; @@ -5377,10 +5395,16 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize, OS << ""; return str; } - if (Type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { + + if (EMachine == EM_AARCH64) { DumpBit(GNU_PROPERTY_AARCH64_FEATURE_1_BTI, "BTI"); DumpBit(GNU_PROPERTY_AARCH64_FEATURE_1_PAC, "PAC"); DumpBit(GNU_PROPERTY_AARCH64_FEATURE_1_GCS, "GCS"); + } else if (EMachine == EM_RISCV) { + DumpBit(GNU_PROPERTY_RISCV_FEATURE_1_CFI_LP_UNLABELED, + "ZICFILP-unlabeled"); + DumpBit(GNU_PROPERTY_RISCV_FEATURE_1_CFI_SS, "ZICFISS"); + DumpBit(GNU_PROPERTY_RISCV_FEATURE_1_CFI_LP_FUNC_SIG, "ZICFILP-func-sig"); } else { DumpBit(GNU_PROPERTY_X86_FEATURE_1_IBT, "IBT"); DumpBit(GNU_PROPERTY_X86_FEATURE_1_SHSTK, "SHSTK"); @@ -5441,7 +5465,8 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize, } template -static SmallVector getGNUPropertyList(ArrayRef Arr) { +static SmallVector +getGNUPropertyList(ArrayRef Arr, typename ELFT::Half EMachine) { using Elf_Word = typename ELFT::Word; SmallVector Properties; @@ -5459,8 +5484,8 @@ static SmallVector getGNUPropertyList(ArrayRef Arr) { Properties.push_back(str); break; } - Properties.push_back( - getGNUProperty(Type, DataSize, Arr.take_front(PaddedSize))); + Properties.push_back(getGNUProperty( + Type, DataSize, Arr.take_front(PaddedSize), EMachine)); Arr = Arr.drop_front(PaddedSize); } @@ -5512,7 +5537,7 @@ static StringRef getDescAsStringRef(ArrayRef Desc) { template static bool printGNUNote(raw_ostream &OS, uint32_t NoteType, - ArrayRef Desc) { + ArrayRef Desc, typename ELFT::Half EMachine) { // Return true if we were able to pretty-print the note, false otherwise. switch (NoteType) { default: @@ -5534,7 +5559,7 @@ static bool printGNUNote(raw_ostream &OS, uint32_t NoteType, break; case ELF::NT_GNU_PROPERTY_TYPE_0: OS << " Properties:"; - for (const std::string &Property : getGNUPropertyList(Desc)) + for (const std::string &Property : getGNUPropertyList(Desc, EMachine)) OS << " " << Property << "\n"; break; } @@ -6223,10 +6248,12 @@ template void GNUELFDumper::printNotes() { else OS << "Unknown note type: (" << format_hex(Type, 10) << ")\n"; + const typename ELFT::Half EMachine = this->Obj.getHeader().e_machine; + // Print the description, or fallback to printing raw bytes for unknown // owners/if we fail to pretty-print the contents. if (Name == "GNU") { - if (printGNUNote(OS, Type, Descriptor)) + if (printGNUNote(OS, Type, Descriptor, EMachine)) return Error::success(); } else if (Name == "FreeBSD") { if (std::optional N = @@ -7913,7 +7940,8 @@ template void LLVMELFDumper::printAddrsig() { template static bool printGNUNoteLLVMStyle(uint32_t NoteType, ArrayRef Desc, - ScopedPrinter &W) { + ScopedPrinter &W, + typename ELFT::Half EMachine) { // Return true if we were able to pretty-print the note, false otherwise. switch (NoteType) { default: @@ -7938,7 +7966,7 @@ static bool printGNUNoteLLVMStyle(uint32_t NoteType, ArrayRef Desc, break; case ELF::NT_GNU_PROPERTY_TYPE_0: ListScope D(W, "Property"); - for (const std::string &Property : getGNUPropertyList(Desc)) + for (const std::string &Property : getGNUPropertyList(Desc, EMachine)) W.printString(Property); break; } @@ -8057,10 +8085,11 @@ template void LLVMELFDumper::printNotes() { W.printString("Type", "Unknown (" + to_string(format_hex(Type, 10)) + ")"); + const typename ELFT::Half EMachine = this->Obj.getHeader().e_machine; // Print the description, or fallback to printing raw bytes for unknown // owners/if we fail to pretty-print the contents. if (Name == "GNU") { - if (printGNUNoteLLVMStyle(Type, Descriptor, W)) + if (printGNUNoteLLVMStyle(Type, Descriptor, W, EMachine)) return Error::success(); } else if (Name == "FreeBSD") { if (std::optional N = diff --git a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp index 06966b1883586..54a912d9438af 100644 --- a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp +++ b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp @@ -153,6 +153,11 @@ dumpDXContainer(MemoryBufferRef Source) { break; case dxbc::PartType::Unknown: break; + case dxbc::PartType::RTS0: + std::optional RS = Container.getRootSignature(); + if (RS.has_value()) + NewPart.RootSignature = DXContainerYAML::RootSignatureDesc(*RS); + break; } } diff --git a/llvm/unittests/Object/DXContainerTest.cpp b/llvm/unittests/Object/DXContainerTest.cpp index 5a2c852d6aef9..e7b491103d2d0 100644 --- a/llvm/unittests/Object/DXContainerTest.cpp +++ b/llvm/unittests/Object/DXContainerTest.cpp @@ -821,3 +821,73 @@ TEST(DXCFile, MalformedSignature) { "the end of the part data")); } } + +TEST(RootSignature, ParseRootFlags) { + { + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, 0x6F, + 0x05, 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1, 0x01, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + }; + DXContainer C = + llvm::cantFail(DXContainer::create(getMemoryBuffer<68>(Buffer))); + + const auto &RS = C.getRootSignature(); + ASSERT_TRUE(RS.has_value()); + ASSERT_EQ(RS->getVersion(), 2u); + ASSERT_EQ(RS->getNumParameters(), 0u); + ASSERT_EQ(RS->getRootParametersOffset(), 0u); + ASSERT_EQ(RS->getNumStaticSamplers(), 0u); + ASSERT_EQ(RS->getStaticSamplersOffset(), 0u); + ASSERT_EQ(RS->getFlags(), 0x01u); + } + + { + // this parameter has the root signature definition missing some values. + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, + 0x6F, 0x05, 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1, 0x01, 0x00, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, + 0x00, 0x00, 0x00, 0x52, 0x54, 0x53, 0x30, 0x18, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + EXPECT_THAT_EXPECTED( + DXContainer::create(getMemoryBuffer<64>(Buffer)), + FailedWithMessage( + "Invalid root signature, insufficient space for header.")); + } + { + // Version has been changed to an invalid number. + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, 0x6F, + 0x05, 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1, 0x01, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + }; + EXPECT_THAT_EXPECTED( + DXContainer::create(getMemoryBuffer<68>(Buffer)), + FailedWithMessage("Stream Error: An unspecified error has occurred. " + "Invalid Root Signature Version")); + } + { + // Flag has been set to an invalid value + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, 0x6F, + 0x05, 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1, 0x01, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0xFF, + }; + EXPECT_THAT_EXPECTED( + DXContainer::create(getMemoryBuffer<68>(Buffer)), + FailedWithMessage("Stream Error: An unspecified error has occurred. " + "Invalid Root Signature flag")); + } +} diff --git a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp index d4232295c8584..b48cd9ce53987 100644 --- a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp +++ b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp @@ -107,3 +107,42 @@ TEST(DXCFile, ParseEmptyParts) { EXPECT_EQ(Storage.size(), 116u); EXPECT_TRUE(memcmp(Buffer, Storage.data(), 116) == 0); } + +TEST(RootSignature, ParseRootFlags) { + SmallString<128> Storage; + + // First read a fully explicit yaml with all sizes and offsets provided + ASSERT_TRUE(convert(Storage, R"(--- !dxcontainer + Header: + Hash: [ 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, 0x6F, 0x5, + 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1 ] + Version: + Major: 1 + Minor: 0 + FileSize: 68 + PartCount: 1 + PartOffsets: [ 36 ] + Parts: + - Name: RTS0 + Size: 24 + RootSignature: + Version: 2 + NumParameters: 0 + RootParametersOffset: 0 + NumStaticSamplers: 0 + StaticSamplersOffset: 0 + AllowInputAssemblerInputLayout: true + )")); + + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x32, 0x9A, 0x53, 0xD8, 0xEC, 0xBE, 0x35, 0x6F, + 0x05, 0x39, 0xE1, 0xFE, 0x31, 0x20, 0xF0, 0xC1, 0x01, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + }; + + EXPECT_EQ(Storage.size(), 68u); + EXPECT_TRUE(memcmp(Buffer, Storage.data(), 68u) == 0); +} diff --git a/llvm/unittests/SandboxIR/TrackerTest.cpp b/llvm/unittests/SandboxIR/TrackerTest.cpp index 4eedab124bfa0..9c18247b6b96d 100644 --- a/llvm/unittests/SandboxIR/TrackerTest.cpp +++ b/llvm/unittests/SandboxIR/TrackerTest.cpp @@ -52,6 +52,9 @@ define void @foo(ptr %ptr) { auto *F = Ctx.createFunction(&LLVMF); auto *BB = &*F->begin(); auto &Tracker = Ctx.getTracker(); + // Check empty(). + EXPECT_TRUE(Ctx.getTracker().empty()); + Tracker.save(); auto It = BB->begin(); auto *Gep0 = &*It++; @@ -65,6 +68,9 @@ define void @foo(ptr %ptr) { EXPECT_EQ(St->getOperand(1), Gep1); EXPECT_EQ(Ld->getOperand(0), Gep1); + // Check empty(). + EXPECT_FALSE(Ctx.getTracker().empty()); + Ctx.getTracker().revert(); EXPECT_NE(St->getOperand(0), Ld); EXPECT_EQ(St->getOperand(1), Gep0); diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp index e7606b9df4626..795185c677e30 100644 --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -967,12 +967,11 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) { IAP.addOperand(ROName, MIOpNum, PrintMethodIdx); // There might be an additional predicate on the MCOperand - unsigned Entry = MCOpPredicateMap[Rec]; + unsigned &Entry = MCOpPredicateMap[Rec]; if (!Entry) { if (!Rec->isValueUnset("MCOperandPredicate")) { MCOpPredicates.push_back(Rec); Entry = MCOpPredicates.size(); - MCOpPredicateMap[Rec] = Entry; } else break; // No conditions on this operand at all } diff --git a/llvm/utils/TableGen/CodeGenMapTable.cpp b/llvm/utils/TableGen/CodeGenMapTable.cpp index 8d22c0013dda8..2641e713c0c85 100644 --- a/llvm/utils/TableGen/CodeGenMapTable.cpp +++ b/llvm/utils/TableGen/CodeGenMapTable.cpp @@ -78,6 +78,7 @@ #include "Common/CodeGenInstruction.h" #include "Common/CodeGenTarget.h" #include "TableGenBackends.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" @@ -361,45 +362,38 @@ unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) { StringRef Namespace = Target.getInstNamespace(); ArrayRef ValueCols = InstrMapDesc.getValueCols(); unsigned NumCol = ValueCols.size(); - unsigned TotalNumInstr = NumberedInstructions.size(); unsigned TableSize = 0; - OS << "static const uint16_t " << InstrMapDesc.getName(); + OS << " using namespace " << Namespace << ";\n"; // Number of columns in the table are NumCol+1 because key instructions are // emitted as first column. - OS << "Table[][" << NumCol + 1 << "] = {\n"; - for (unsigned I = 0; I < TotalNumInstr; I++) { - const Record *CurInstr = NumberedInstructions[I]->TheDef; + for (const CodeGenInstruction *Inst : NumberedInstructions) { + const Record *CurInstr = Inst->TheDef; ArrayRef ColInstrs = MapTable[CurInstr]; + if (ColInstrs.empty()) + continue; std::string OutStr; - unsigned RelExists = 0; - if (!ColInstrs.empty()) { - for (unsigned J = 0; J < NumCol; J++) { - if (ColInstrs[J] != nullptr) { - RelExists = 1; - OutStr += ", "; - OutStr += Namespace; - OutStr += "::"; - OutStr += ColInstrs[J]->getName(); - } else { - OutStr += ", (uint16_t)-1U"; - } + bool RelExists = false; + for (const Record *ColInstr : ColInstrs) { + if (ColInstr) { + RelExists = true; + OutStr += ", "; + OutStr += ColInstr->getName(); + } else { + OutStr += ", (uint16_t)-1U"; } + } - if (RelExists) { - OS << " { " << Namespace << "::" << CurInstr->getName(); - OS << OutStr << " },\n"; - TableSize++; - } + if (RelExists) { + if (TableSize == 0) + OS << " static constexpr uint16_t Table[][" << NumCol + 1 << "] = {\n"; + OS << " { " << CurInstr->getName() << OutStr << " },\n"; + ++TableSize; } } - if (!TableSize) { - OS << " { " << Namespace << "::" - << "INSTRUCTION_LIST_END, "; - OS << Namespace << "::" - << "INSTRUCTION_LIST_END }"; - } - OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n"; + + if (TableSize != 0) + OS << " }; // End of Table\n\n"; return TableSize; } @@ -409,15 +403,19 @@ unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) { //===----------------------------------------------------------------------===// void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) { + if (TableSize == 0) { + OS << " return -1;\n"; + return; + } + OS << " unsigned mid;\n"; OS << " unsigned start = 0;\n"; OS << " unsigned end = " << TableSize << ";\n"; OS << " while (start < end) {\n"; OS << " mid = start + (end - start) / 2;\n"; - OS << " if (Opcode == " << InstrMapDesc.getName() << "Table[mid][0]) {\n"; + OS << " if (Opcode == Table[mid][0]) \n"; OS << " break;\n"; - OS << " }\n"; - OS << " if (Opcode < " << InstrMapDesc.getName() << "Table[mid][0])\n"; + OS << " if (Opcode < Table[mid][0])\n"; OS << " end = mid;\n"; OS << " else\n"; OS << " start = mid + 1;\n"; @@ -431,7 +429,6 @@ void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) { //===----------------------------------------------------------------------===// void MapTableEmitter::emitMapFuncBody(raw_ostream &OS, unsigned TableSize) { - const ListInit *ColFields = InstrMapDesc.getColFields(); ArrayRef ValueCols = InstrMapDesc.getValueCols(); @@ -439,6 +436,8 @@ void MapTableEmitter::emitMapFuncBody(raw_ostream &OS, unsigned TableSize) { // relation table. If found, return opcode value from the appropriate column // of the table. emitBinSearch(OS, TableSize); + if (TableSize == 0) + return; if (ValueCols.size() > 1) { for (unsigned I = 0, E = ValueCols.size(); I < E; I++) { @@ -453,14 +452,12 @@ void MapTableEmitter::emitMapFuncBody(raw_ostream &OS, unsigned TableSize) { OS << " && "; } OS << ")\n"; - OS << " return " << InstrMapDesc.getName(); - OS << "Table[mid][" << I + 1 << "];\n"; + OS << " return Table[mid][" << I + 1 << "];\n"; } OS << " return -1;"; - } else - OS << " return " << InstrMapDesc.getName() << "Table[mid][1];\n"; - - OS << "}\n\n"; + } else { + OS << " return Table[mid][1];\n"; + } } //===----------------------------------------------------------------------===// @@ -468,7 +465,6 @@ void MapTableEmitter::emitMapFuncBody(raw_ostream &OS, unsigned TableSize) { //===----------------------------------------------------------------------===// void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) { - // Emit function name and the input parameters : mostly opcode value of the // current instruction. However, if a table has multiple columns (more than 2 // since first column is used for the key instructions), then we also need @@ -491,6 +487,8 @@ void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) { // Emit rest of the function body. emitMapFuncBody(OS, TableSize); + + OS << "}\n\n"; } //===----------------------------------------------------------------------===// @@ -498,7 +496,7 @@ void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) { //===----------------------------------------------------------------------===// static void emitEnums(raw_ostream &OS, const RecordKeeper &Records) { - std::map> ColFieldValueMap; + std::map> ColFieldValueMap; // Iterate over all InstrMapping records and create a map between column // fields and their possible values across all records. @@ -507,10 +505,9 @@ static void emitEnums(raw_ostream &OS, const RecordKeeper &Records) { const ListInit *ColFields = CurMap->getValueAsListInit("ColFields"); const ListInit *List = CurMap->getValueAsListInit("ValueCols"); std::vector ValueCols; - unsigned ListSize = List->size(); - for (unsigned J = 0; J < ListSize; J++) { - const auto *ListJ = cast(List->getElement(J)); + for (const Init *Elem : *List) { + const auto *ListJ = cast(Elem); if (ListJ->size() != ColFields->size()) PrintFatalError("Record `" + CurMap->getName() + @@ -521,37 +518,26 @@ static void emitEnums(raw_ostream &OS, const RecordKeeper &Records) { } for (unsigned J = 0, EndCf = ColFields->size(); J < EndCf; J++) { - for (unsigned K = 0; K < ListSize; K++) { - std::string ColName = ColFields->getElement(J)->getAsUnquotedString(); - ColFieldValueMap[ColName].push_back((ValueCols[K])->getElement(J)); - } + std::string ColName = ColFields->getElement(J)->getAsUnquotedString(); + auto &MapEntry = ColFieldValueMap[ColName]; + for (const ListInit *List : ValueCols) + MapEntry.insert(List->getElement(J)); } } for (auto &[EnumName, FieldValues] : ColFieldValueMap) { - // Delete duplicate entries from ColFieldValueMap - for (unsigned i = 0; i < FieldValues.size() - 1; i++) { - const Init *CurVal = FieldValues[i]; - for (unsigned j = i + 1; j < FieldValues.size(); j++) { - if (CurVal == FieldValues[j]) { - FieldValues.erase(FieldValues.begin() + j); - --j; - } - } - } - // Emit enumerated values for the column fields. OS << "enum " << EnumName << " {\n"; ListSeparator LS(",\n"); for (const Init *Field : FieldValues) - OS << LS << "\t" << EnumName << "_" << Field->getAsUnquotedString(); + OS << LS << " " << EnumName << "_" << Field->getAsUnquotedString(); OS << "\n};\n\n"; } } //===----------------------------------------------------------------------===// // Parse 'InstrMapping' records and use the information to form relationship -// between instructions. These relations are emitted as a tables along with the +// between instructions. These relations are emitted as tables along with the // functions to query them. //===----------------------------------------------------------------------===// void llvm::EmitMapTable(const RecordKeeper &Records, raw_ostream &OS) { @@ -565,8 +551,7 @@ void llvm::EmitMapTable(const RecordKeeper &Records, raw_ostream &OS) { OS << "#ifdef GET_INSTRMAP_INFO\n"; OS << "#undef GET_INSTRMAP_INFO\n"; - OS << "namespace llvm {\n\n"; - OS << "namespace " << NameSpace << " {\n\n"; + OS << "namespace llvm::" << NameSpace << " {\n\n"; // Emit coulumn field names and their values as enums. emitEnums(OS, Records); @@ -589,7 +574,6 @@ void llvm::EmitMapTable(const RecordKeeper &Records, raw_ostream &OS) { // Emit map tables and the functions to query them. IMap.emitTablesWithFunc(OS); } - OS << "} // end namespace " << NameSpace << "\n"; - OS << "} // end namespace llvm\n"; + OS << "} // end namespace llvm::" << NameSpace << '\n'; OS << "#endif // GET_INSTRMAP_INFO\n\n"; } diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp index 42cc5655c3bae..ed062168dbc6e 100644 --- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp @@ -75,6 +75,164 @@ static void ContractNodes(std::unique_ptr &MatcherPtr, } } + // Turn MoveParent->MoveChild into MoveSibling. + if (auto *MP = dyn_cast(N)) { + if (auto *MC = dyn_cast(MP->getNext())) { + auto *MS = new MoveSiblingMatcher(MC->getChildNo()); + MS->setNext(MC->takeNext()); + MatcherPtr.reset(MS); + return ContractNodes(MatcherPtr, CGP); + } + } + + // Uncontract MoveSibling if it will help form other child operations. + if (auto *MS = dyn_cast(N)) { + if (auto *RM = dyn_cast(MS->getNext())) { + // Turn MoveSibling->Record->MoveParent into MoveParent->RecordChild. + if (auto *MP = dyn_cast(RM->getNext())) { + if (MS->getSiblingNo() < 8) { // Only have RecordChild0...7 + auto *NewMP = new MoveParentMatcher(); + auto *NewRCM = new RecordChildMatcher( + MS->getSiblingNo(), RM->getWhatFor(), RM->getResultNo()); + NewMP->setNext(NewRCM); + NewRCM->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + + // Turn MoveSibling->Record->CheckType->MoveParent into + // MoveParent->RecordChild->CheckChildType. + if (auto *CT = dyn_cast(RM->getNext())) { + if (auto *MP = dyn_cast(CT->getNext())) { + if (MS->getSiblingNo() < 8 && // Only have CheckChildType0...7 + CT->getResNo() == 0) { // CheckChildType checks res #0 + auto *NewMP = new MoveParentMatcher(); + auto *NewRCM = new RecordChildMatcher( + MS->getSiblingNo(), RM->getWhatFor(), RM->getResultNo()); + auto *NewCCT = + new CheckChildTypeMatcher(MS->getSiblingNo(), CT->getType()); + NewMP->setNext(NewRCM); + NewRCM->setNext(NewCCT); + NewCCT->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + } + } + + // Turn MoveSibling->CheckType->MoveParent into MoveParent->CheckChildType. + if (auto *CT = dyn_cast(MS->getNext())) { + if (auto *MP = dyn_cast(CT->getNext())) { + if (MS->getSiblingNo() < 8 && // Only have CheckChildType0...7 + CT->getResNo() == 0) { // CheckChildType checks res #0 + auto *NewMP = new MoveParentMatcher(); + auto *NewCCT = + new CheckChildTypeMatcher(MS->getSiblingNo(), CT->getType()); + NewMP->setNext(NewCCT); + NewCCT->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + } + + // Turn MoveSibling->CheckInteger->MoveParent into + // MoveParent->CheckChildInteger. + if (auto *CI = dyn_cast(MS->getNext())) { + if (auto *MP = dyn_cast(CI->getNext())) { + if (MS->getSiblingNo() < 5) { // Only have CheckChildInteger0...4 + auto *NewMP = new MoveParentMatcher(); + auto *NewCCI = + new CheckChildIntegerMatcher(MS->getSiblingNo(), CI->getValue()); + NewMP->setNext(NewCCI); + NewCCI->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + + // Turn MoveSibling->CheckInteger->CheckType->MoveParent into + // MoveParent->CheckChildInteger->CheckType. + if (auto *CT = dyn_cast(CI->getNext())) { + if (auto *MP = dyn_cast(CT->getNext())) { + if (MS->getSiblingNo() < 5 && // Only have CheckChildInteger0...4 + CT->getResNo() == 0) { // CheckChildType checks res #0 + auto *NewMP = new MoveParentMatcher(); + auto *NewCCI = new CheckChildIntegerMatcher(MS->getSiblingNo(), + CI->getValue()); + auto *NewCCT = + new CheckChildTypeMatcher(MS->getSiblingNo(), CT->getType()); + NewMP->setNext(NewCCI); + NewCCI->setNext(NewCCT); + NewCCT->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + } + } + + // Turn MoveSibling->CheckCondCode->MoveParent into + // MoveParent->CheckChild2CondCode. + if (auto *CCC = dyn_cast(MS->getNext())) { + if (auto *MP = dyn_cast(CCC->getNext())) { + if (MS->getSiblingNo() == 2) { // Only have CheckChild2CondCode + auto *NewMP = new MoveParentMatcher(); + auto *NewCCCC = + new CheckChild2CondCodeMatcher(CCC->getCondCodeName()); + NewMP->setNext(NewCCCC); + NewCCCC->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + } + + // Turn MoveSibling->CheckSame->MoveParent into + // MoveParent->CheckChildSame. + if (auto *CS = dyn_cast(MS->getNext())) { + if (auto *MP = dyn_cast(CS->getNext())) { + if (MS->getSiblingNo() < 4) { // Only have CheckChildSame0...3 + auto *NewMP = new MoveParentMatcher(); + auto *NewCCS = new CheckChildSameMatcher(MS->getSiblingNo(), + CS->getMatchNumber()); + NewMP->setNext(NewCCS); + NewCCS->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + + // Turn MoveSibling->CheckSame->CheckType->MoveParent into + // MoveParent->CheckChildSame->CheckChildType. + if (auto *CT = dyn_cast(CS->getNext())) { + if (auto *MP = dyn_cast(CT->getNext())) { + if (MS->getSiblingNo() < 4 && // Only have CheckChildSame0...3 + CT->getResNo() == 0) { // CheckChildType checks res #0 + auto *NewMP = new MoveParentMatcher(); + auto *NewCCS = new CheckChildSameMatcher(MS->getSiblingNo(), + CS->getMatchNumber()); + auto *NewCCT = + new CheckChildTypeMatcher(MS->getSiblingNo(), CT->getType()); + NewMP->setNext(NewCCS); + NewCCS->setNext(NewCCT); + NewCCT->setNext(MP->takeNext()); + MatcherPtr.reset(NewMP); + return ContractNodes(MatcherPtr, CGP); + } + } + } + } + + // Turn MoveSibling->MoveParent into MoveParent. + if (isa(MS->getNext())) { + MatcherPtr.reset(MS->takeNext()); + return ContractNodes(MatcherPtr, CGP); + } + } + // Zap movechild -> moveparent. if (MoveChildMatcher *MC = dyn_cast(N)) if (MoveParentMatcher *MP = dyn_cast(MC->getNext())) { @@ -153,30 +311,6 @@ static void ContractNodes(std::unique_ptr &MatcherPtr, } ContractNodes(N->getNextPtr(), CGP); - - // If we have a MoveParent followed by a MoveChild, we convert it to - // MoveSibling. - if (auto *MP = dyn_cast(N)) { - if (auto *MC = dyn_cast(MP->getNext())) { - auto *MS = new MoveSiblingMatcher(MC->getChildNo()); - MS->setNext(MC->takeNext()); - MatcherPtr.reset(MS); - return ContractNodes(MatcherPtr, CGP); - } - if (auto *RC = dyn_cast(MP->getNext())) { - if (auto *MC = dyn_cast(RC->getNext())) { - if (RC->getChildNo() == MC->getChildNo()) { - auto *MS = new MoveSiblingMatcher(MC->getChildNo()); - auto *RM = new RecordMatcher(RC->getWhatFor(), RC->getResultNo()); - // Insert the new node. - RM->setNext(MC->takeNext()); - MS->setNext(RM); - MatcherPtr.reset(MS); - return ContractNodes(MatcherPtr, CGP); - } - } - } - } } /// FindNodeWithKind - Scan a series of matchers looking for a matcher with a @@ -362,13 +496,13 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { // Check to see if all of the leading entries are now opcode checks. If so, // we can convert this Scope to be a OpcodeSwitch instead. bool AllOpcodeChecks = true, AllTypeChecks = true; - for (unsigned i = 0, e = OptionsToMatch.size(); i != e; ++i) { + for (Matcher *Optn : OptionsToMatch) { // Check to see if this breaks a series of CheckOpcodeMatchers. - if (AllOpcodeChecks && !isa(OptionsToMatch[i])) { + if (AllOpcodeChecks && !isa(Optn)) { #if 0 if (i > 3) { errs() << "FAILING OPC #" << i << "\n"; - OptionsToMatch[i]->dump(); + Optn->dump(); } #endif AllOpcodeChecks = false; @@ -377,7 +511,7 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { // Check to see if this breaks a series of CheckTypeMatcher's. if (AllTypeChecks) { CheckTypeMatcher *CTM = cast_or_null( - FindNodeWithKind(OptionsToMatch[i], Matcher::CheckType)); + FindNodeWithKind(Optn, Matcher::CheckType)); if (!CTM || // iPTR checks could alias any other case without us knowing, don't // bother with them. @@ -386,12 +520,11 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { CTM->getResNo() != 0 || // If the CheckType isn't at the start of the list, see if we can move // it there. - !CTM->canMoveBefore(OptionsToMatch[i])) { + !CTM->canMoveBefore(Optn)) { #if 0 if (i > 3 && AllTypeChecks) { errs() << "FAILING TYPE #" << i << "\n"; - OptionsToMatch[i]->dump(); - } + Optn->dump(); } #endif AllTypeChecks = false; } @@ -402,8 +535,8 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { if (AllOpcodeChecks) { StringSet<> Opcodes; SmallVector, 8> Cases; - for (unsigned i = 0, e = OptionsToMatch.size(); i != e; ++i) { - CheckOpcodeMatcher *COM = cast(OptionsToMatch[i]); + for (Matcher *Optn : OptionsToMatch) { + CheckOpcodeMatcher *COM = cast(Optn); assert(Opcodes.insert(COM->getOpcode().getEnumName()).second && "Duplicate opcodes not factored?"); Cases.emplace_back(&COM->getOpcode(), COM->takeNext()); @@ -418,12 +551,12 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { if (AllTypeChecks) { DenseMap TypeEntry; SmallVector, 8> Cases; - for (unsigned i = 0, e = OptionsToMatch.size(); i != e; ++i) { - Matcher *M = FindNodeWithKind(OptionsToMatch[i], Matcher::CheckType); + for (Matcher *Optn : OptionsToMatch) { + Matcher *M = FindNodeWithKind(Optn, Matcher::CheckType); assert(M && isa(M) && "Unknown Matcher type"); auto *CTM = cast(M); - Matcher *MatcherWithoutCTM = OptionsToMatch[i]->unlinkNode(CTM); + Matcher *MatcherWithoutCTM = Optn->unlinkNode(CTM); MVT::SimpleValueType CTMTy = CTM->getType(); delete CTM; diff --git a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp index 1ee79aa27fa98..df43f39e0e9be 100644 --- a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp +++ b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp @@ -228,8 +228,9 @@ void X86InstrMappingEmitter::emitCompressEVEXTable( // For each pre-compression instruction look for a match in the // appropriate vector (instructions with the same opcode) using function // object IsMatch. - auto Match = llvm::find_if(CompressedInsts[Opcode], IsMatch(Inst)); - if (Match != CompressedInsts[Opcode].end()) + const auto &Insts = CompressedInsts[Opcode]; + auto Match = llvm::find_if(Insts, IsMatch(Inst)); + if (Match != Insts.end()) NewInst = *Match; } diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn index f333d457a0f99..926407fbea2af 100644 --- a/llvm/utils/gn/secondary/clang/test/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn @@ -67,7 +67,7 @@ write_lit_config("lit_site_cfg") { "ENABLE_SHARED=0", "LLVM_EXTERNAL_LIT=", "LLVM_HOST_TRIPLE=$llvm_current_triple", - "LLVM_INCLUDE_SPIRV_TOOLS_TESTS=", + "LLVM_INCLUDE_SPIRV_TOOLS_TESTS=0", "LLVM_LIT_TOOLS_DIR=", # Intentionally empty, matches cmake build. "LLVM_TOOL_LLVM_DRIVER_BUILD=0", # FIXME: Add actual support for this. "LLVM_USE_SANITIZER=", diff --git a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn index 49bf1516b2781..0104684e07d18 100644 --- a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn @@ -6,6 +6,7 @@ static_library("TargetProcess") { "//llvm/lib/TargetParser", ] sources = [ + "DefaultHostBootstrapValues.cpp", "ExecutorSharedMemoryMapperService.cpp", "JITLoaderGDB.cpp", "JITLoaderPerf.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/MC/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/MC/BUILD.gn index 274ab154c441b..c76962ae92c1c 100644 --- a/llvm/utils/gn/secondary/llvm/lib/MC/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/MC/BUILD.gn @@ -14,6 +14,7 @@ static_library("MC") { sources = [ "ConstantPools.cpp", "DXContainerPSVInfo.cpp", + "DXContainerRootSignature.cpp", "ELFObjectWriter.cpp", "GOFFObjectWriter.cpp", "MCAsmBackend.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn index f59b6446f0dea..433a7f43bb780 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn @@ -19,6 +19,7 @@ static_library("Vectorize") { "SandboxVectorizer/Legality.cpp", "SandboxVectorizer/Passes/BottomUpVec.cpp", "SandboxVectorizer/Passes/RegionsFromMetadata.cpp", + "SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp", "SandboxVectorizer/SandboxVectorizer.cpp", "SandboxVectorizer/SandboxVectorizerPassBuilder.cpp", "SandboxVectorizer/Scheduler.cpp", diff --git a/mlir/include/mlir/Bytecode/BytecodeWriter.h b/mlir/include/mlir/Bytecode/BytecodeWriter.h index 0287e004bb993..c6cff0bc81314 100644 --- a/mlir/include/mlir/Bytecode/BytecodeWriter.h +++ b/mlir/include/mlir/Bytecode/BytecodeWriter.h @@ -82,6 +82,7 @@ class BytecodeWriterConfig { /// printers for the fallback resources within the map. BytecodeWriterConfig(FallbackAsmResourceMap &map, StringRef producer = "MLIR" LLVM_VERSION_STRING); + BytecodeWriterConfig(BytecodeWriterConfig &&); ~BytecodeWriterConfig(); /// An internal implementation class that contains the state of the diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h index b1fbf4477428c..7164ade6ea53a 100644 --- a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h @@ -610,6 +610,14 @@ FailureOr simplifyConstrainedMinMaxOp(Operation *op, FlatAffineValueConstraints constraints); +/// Find the innermost common `Block` of `a` and `b` in the affine scope +/// that `a` and `b` are part of. Return nullptr if they belong to different +/// affine scopes. Also, return nullptr if they do not have a common `Block` +/// ancestor (for eg., when they are part of the `then` and `else` regions +/// of an op that itself starts an affine scope. +mlir::Block *findInnermostCommonBlockInScope(mlir::Operation *a, + mlir::Operation *b); + } // namespace affine } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td index ea9b0f6509b80..d50b6aeca15c9 100644 --- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td +++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td @@ -144,13 +144,6 @@ class Arith_CompareOp traits = []> : let assemblyFormat = "$predicate `,` $lhs `,` $rhs attr-dict `:` type($lhs)"; } -// Just like `Arith_CompareOp` but also admits 0-D vectors. Introduced -// temporarily to allow gradual transition to 0-D vectors. -class Arith_CompareOpOfAnyRank traits = []> : - Arith_CompareOp { - let results = (outs BoolLikeOfAnyRank:$result); -} - class Arith_IntBinaryOpWithOverflowFlags traits = []> : Arith_BinaryOp, @@ -1426,9 +1419,9 @@ def Arith_BitcastOp : Arith_CastOp<"bitcast", BitcastTypeConstraint, // CmpIOp //===----------------------------------------------------------------------===// -def Arith_CmpIOp - : Arith_CompareOpOfAnyRank<"cmpi", - [DeclareOpInterfaceMethods]> { +def Arith_CmpIOp : Arith_CompareOp<"cmpi", + [DeclareOpInterfaceMethods]> { let summary = "integer comparison operation"; let description = [{ The `cmpi` operation is a generic comparison for integer-like types. Its two @@ -1495,8 +1488,8 @@ def Arith_CmpIOp }]; let arguments = (ins Arith_CmpIPredicateAttr:$predicate, - SignlessIntegerOrIndexLikeOfAnyRank:$lhs, - SignlessIntegerOrIndexLikeOfAnyRank:$rhs); + SignlessIntegerOrIndexLike:$lhs, + SignlessIntegerOrIndexLike:$rhs); let hasFolder = 1; let hasCanonicalizer = 1; diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td index 3d378751e798f..fe15a524ec3b5 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -463,7 +463,17 @@ def NVVM_BarrierOp : NVVM_Op<"barrier", [AttrSizedOperandSegments]> { } }]; let hasVerifier = 1; + let assemblyFormat = "(`id` `=` $barrierId^)? (`number_of_threads` `=` $numberOfThreads^)? attr-dict"; + + let builders = [ + OpBuilder<(ins), [{ + return build($_builder, $_state, Value{}, Value{}); + }]>, + OpBuilder<(ins "Value":$barrierId), [{ + return build($_builder, $_state, barrierId, Value{}); + }]> + ]; } def NVVM_BarrierArriveOp : NVVM_PTXBuilder_Op<"barrier.arrive"> @@ -2607,6 +2617,30 @@ def Tcgen05GroupKindAttr : let assemblyFormat = "`<` $value `>`"; } +def Tcgen05FenceBefore : I32EnumAttrCase<"BEFORE_THREAD_SYNC", 0, "before">; +def Tcgen05FenceAfter : I32EnumAttrCase<"AFTER_THREAD_SYNC", 1, "after">; +def Tcgen05FenceKind : I32EnumAttr<"Tcgen05FenceKind", "NVVM Tcgen05 fence kind", + [Tcgen05FenceBefore, Tcgen05FenceAfter]> { + let genSpecializedAttr = 0; + let cppNamespace = "::mlir::NVVM"; +} +def Tcgen05FenceKindAttr : + EnumAttr { + let assemblyFormat = "`<` $value `>`"; +} + +def Tcgen05WaitLoad : I32EnumAttrCase<"LOAD", 0, "load">; +def Tcgen05WaitStore : I32EnumAttrCase<"STORE", 1, "store">; +def Tcgen05WaitKind : I32EnumAttr<"Tcgen05WaitKind", "NVVM Tcgen05 wait kind", + [Tcgen05WaitLoad, Tcgen05WaitStore]> { + let genSpecializedAttr = 0; + let cppNamespace = "::mlir::NVVM"; +} +def Tcgen05WaitKindAttr : + EnumAttr { + let assemblyFormat = "`<` $value `>`"; +} + def NVVM_Tcgen05AllocOp : NVVM_Op<"tcgen05.alloc"> { let summary = "Tcgen05 alloc operation"; let description = [{ @@ -2691,6 +2725,91 @@ def NVVM_Tcgen05RelinquishAllocPermitOp : NVVM_Op<"tcgen05.relinquish_alloc_perm }]; } +def NVVM_Tcgen05FenceOp : NVVM_Op<"tcgen05.fence"> { + let summary = "Tcgen05 fence operations"; + let description = [{ + The `tcgen05.fence` orders all prior async tcgen05 operations + with respect to the subsequent tcgen05 and execution ordering operations. + The `tcgen05.fence` orders all subsequent async tcgen05 operations + with respect to the prior tcgen05 and execution ordering operations. + + [For more information refer to the PTX ISA] + (https://docs.nvidia.com/cuda/parallel-thread-execution/#tensorcore-5th-generation-instructions-tcgen05-fence) + }]; + + let arguments = (ins Tcgen05FenceKindAttr:$kind); + let assemblyFormat = "$kind attr-dict"; + + string llvmBuilder = [{ + auto id = ($kind == NVVM::Tcgen05FenceKind::BEFORE_THREAD_SYNC) + ? llvm::Intrinsic::nvvm_tcgen05_fence_before_thread_sync + : llvm::Intrinsic::nvvm_tcgen05_fence_after_thread_sync; + createIntrinsicCall(builder, id); + }]; +} + +def NVVM_Tcgen05WaitOp : NVVM_Op<"tcgen05.wait"> { + let summary = "Tcgen05 wait operations"; + let description = [{ + The `tcgen05.wait` causes the executing thread to block until + all prior `tcgen05.ld` operations issued by the executing thread + have completed. Similarly, the `tcgen05.wait` causes the executing + thread to block until all prior `tcgen05.st` operations issued by the + executing thread have completed. + [For more information refer PTX ISA] + (https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-wait) + }]; + + let arguments = (ins Tcgen05WaitKindAttr:$kind); + let assemblyFormat = "$kind attr-dict"; + + string llvmBuilder = [{ + auto id = ($kind == NVVM::Tcgen05WaitKind::LOAD) + ? llvm::Intrinsic::nvvm_tcgen05_wait_ld + : llvm::Intrinsic::nvvm_tcgen05_wait_st; + createIntrinsicCall(builder, id); + }]; +} + +def NVVM_Tcgen05CommitOp : NVVM_Op<"tcgen05.commit"> { + let summary = "Tcgen05 commit operations"; + let description = [{ + The `tcgen05.commit` makes the mbarrier object, specified by + the operand `addr`, track the completion of all the prior + async-tcgen05 operations initiated by the executing thread. + The multicast variants allow signaling on the mbarrier objects + of multiple CTAs within the cluster. Operand `multicastMask`, + when present, specifies the destination CTAs in the cluster such + that each bit position in the 16-bit `multicastMask` operand + corresponds to the `nvvm.read.ptx.sreg.ctaid` of the destination CTA. + [For more information refer PTX ISA] + (https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen-async-sync-operations-commit) + }]; + + let arguments = (ins + AnyTypeOf<[LLVM_AnyPointer, LLVM_PointerShared]>:$addr, + Optional:$multicastMask, + DefaultValuedAttr:$group); + + let assemblyFormat = [{ + $addr (`,` `multicast_mask` `=` $multicastMask^)? + attr-dict `:` type(operands) + }]; + + let extraClassDeclaration = [{ + static llvm::Intrinsic::ID + getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt, + llvm::SmallVector &args); + }]; + + string llvmBuilder = [{ + llvm::SmallVector args; + auto id = NVVM::Tcgen05CommitOp::getIntrinsicIDAndArgs( + *op, moduleTranslation, args); + createIntrinsicCall(builder, id, args); + }]; +} + //===----------------------------------------------------------------------===// // NVVM target attribute. //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td index 89e406183e0b0..cf33503764abb 100644 --- a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td +++ b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td @@ -26,7 +26,8 @@ def Polynomial_PolynomialType : Polynomial_Type<"Polynomial", "polynomial"> { let assemblyFormat = "`<` struct(params) `>`"; } -def PolynomialLike: TypeOrContainer; +def PolynomialLike : TypeOrValueSemanticsContainer< + Polynomial_PolynomialType, "polynomial-like">; #endif // POLYNOMIAL_TYPES diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td index e9922b6fedb12..6b2e4189aea02 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td @@ -4301,238 +4301,239 @@ class SPIRV_OpCode { // Begin opcode section. Generated from SPIR-V spec; DO NOT MODIFY! -def SPIRV_OC_OpNop : I32EnumAttrCase<"OpNop", 0>; -def SPIRV_OC_OpUndef : I32EnumAttrCase<"OpUndef", 1>; -def SPIRV_OC_OpSourceContinued : I32EnumAttrCase<"OpSourceContinued", 2>; -def SPIRV_OC_OpSource : I32EnumAttrCase<"OpSource", 3>; -def SPIRV_OC_OpSourceExtension : I32EnumAttrCase<"OpSourceExtension", 4>; -def SPIRV_OC_OpName : I32EnumAttrCase<"OpName", 5>; -def SPIRV_OC_OpMemberName : I32EnumAttrCase<"OpMemberName", 6>; -def SPIRV_OC_OpString : I32EnumAttrCase<"OpString", 7>; -def SPIRV_OC_OpLine : I32EnumAttrCase<"OpLine", 8>; -def SPIRV_OC_OpExtension : I32EnumAttrCase<"OpExtension", 10>; -def SPIRV_OC_OpExtInstImport : I32EnumAttrCase<"OpExtInstImport", 11>; -def SPIRV_OC_OpExtInst : I32EnumAttrCase<"OpExtInst", 12>; -def SPIRV_OC_OpMemoryModel : I32EnumAttrCase<"OpMemoryModel", 14>; -def SPIRV_OC_OpEntryPoint : I32EnumAttrCase<"OpEntryPoint", 15>; -def SPIRV_OC_OpExecutionMode : I32EnumAttrCase<"OpExecutionMode", 16>; -def SPIRV_OC_OpCapability : I32EnumAttrCase<"OpCapability", 17>; -def SPIRV_OC_OpTypeVoid : I32EnumAttrCase<"OpTypeVoid", 19>; -def SPIRV_OC_OpTypeBool : I32EnumAttrCase<"OpTypeBool", 20>; -def SPIRV_OC_OpTypeInt : I32EnumAttrCase<"OpTypeInt", 21>; -def SPIRV_OC_OpTypeFloat : I32EnumAttrCase<"OpTypeFloat", 22>; -def SPIRV_OC_OpTypeVector : I32EnumAttrCase<"OpTypeVector", 23>; -def SPIRV_OC_OpTypeMatrix : I32EnumAttrCase<"OpTypeMatrix", 24>; -def SPIRV_OC_OpTypeImage : I32EnumAttrCase<"OpTypeImage", 25>; -def SPIRV_OC_OpTypeSampledImage : I32EnumAttrCase<"OpTypeSampledImage", 27>; -def SPIRV_OC_OpTypeArray : I32EnumAttrCase<"OpTypeArray", 28>; -def SPIRV_OC_OpTypeRuntimeArray : I32EnumAttrCase<"OpTypeRuntimeArray", 29>; -def SPIRV_OC_OpTypeStruct : I32EnumAttrCase<"OpTypeStruct", 30>; -def SPIRV_OC_OpTypePointer : I32EnumAttrCase<"OpTypePointer", 32>; -def SPIRV_OC_OpTypeFunction : I32EnumAttrCase<"OpTypeFunction", 33>; -def SPIRV_OC_OpTypeForwardPointer : I32EnumAttrCase<"OpTypeForwardPointer", 39>; -def SPIRV_OC_OpConstantTrue : I32EnumAttrCase<"OpConstantTrue", 41>; -def SPIRV_OC_OpConstantFalse : I32EnumAttrCase<"OpConstantFalse", 42>; -def SPIRV_OC_OpConstant : I32EnumAttrCase<"OpConstant", 43>; -def SPIRV_OC_OpConstantComposite : I32EnumAttrCase<"OpConstantComposite", 44>; -def SPIRV_OC_OpConstantNull : I32EnumAttrCase<"OpConstantNull", 46>; -def SPIRV_OC_OpSpecConstantTrue : I32EnumAttrCase<"OpSpecConstantTrue", 48>; -def SPIRV_OC_OpSpecConstantFalse : I32EnumAttrCase<"OpSpecConstantFalse", 49>; -def SPIRV_OC_OpSpecConstant : I32EnumAttrCase<"OpSpecConstant", 50>; -def SPIRV_OC_OpSpecConstantComposite : I32EnumAttrCase<"OpSpecConstantComposite", 51>; -def SPIRV_OC_OpSpecConstantOp : I32EnumAttrCase<"OpSpecConstantOp", 52>; -def SPIRV_OC_OpFunction : I32EnumAttrCase<"OpFunction", 54>; -def SPIRV_OC_OpFunctionParameter : I32EnumAttrCase<"OpFunctionParameter", 55>; -def SPIRV_OC_OpFunctionEnd : I32EnumAttrCase<"OpFunctionEnd", 56>; -def SPIRV_OC_OpFunctionCall : I32EnumAttrCase<"OpFunctionCall", 57>; -def SPIRV_OC_OpVariable : I32EnumAttrCase<"OpVariable", 59>; -def SPIRV_OC_OpLoad : I32EnumAttrCase<"OpLoad", 61>; -def SPIRV_OC_OpStore : I32EnumAttrCase<"OpStore", 62>; -def SPIRV_OC_OpCopyMemory : I32EnumAttrCase<"OpCopyMemory", 63>; -def SPIRV_OC_OpAccessChain : I32EnumAttrCase<"OpAccessChain", 65>; -def SPIRV_OC_OpPtrAccessChain : I32EnumAttrCase<"OpPtrAccessChain", 67>; -def SPIRV_OC_OpInBoundsPtrAccessChain : I32EnumAttrCase<"OpInBoundsPtrAccessChain", 70>; -def SPIRV_OC_OpDecorate : I32EnumAttrCase<"OpDecorate", 71>; -def SPIRV_OC_OpMemberDecorate : I32EnumAttrCase<"OpMemberDecorate", 72>; -def SPIRV_OC_OpVectorExtractDynamic : I32EnumAttrCase<"OpVectorExtractDynamic", 77>; -def SPIRV_OC_OpVectorInsertDynamic : I32EnumAttrCase<"OpVectorInsertDynamic", 78>; -def SPIRV_OC_OpVectorShuffle : I32EnumAttrCase<"OpVectorShuffle", 79>; -def SPIRV_OC_OpCompositeConstruct : I32EnumAttrCase<"OpCompositeConstruct", 80>; -def SPIRV_OC_OpCompositeExtract : I32EnumAttrCase<"OpCompositeExtract", 81>; -def SPIRV_OC_OpCompositeInsert : I32EnumAttrCase<"OpCompositeInsert", 82>; -def SPIRV_OC_OpTranspose : I32EnumAttrCase<"OpTranspose", 84>; -def SPIRV_OC_OpImageDrefGather : I32EnumAttrCase<"OpImageDrefGather", 97>; -def SPIRV_OC_OpImageWrite : I32EnumAttrCase<"OpImageWrite", 99>; -def SPIRV_OC_OpImage : I32EnumAttrCase<"OpImage", 100>; -def SPIRV_OC_OpImageQuerySize : I32EnumAttrCase<"OpImageQuerySize", 104>; -def SPIRV_OC_OpConvertFToU : I32EnumAttrCase<"OpConvertFToU", 109>; -def SPIRV_OC_OpConvertFToS : I32EnumAttrCase<"OpConvertFToS", 110>; -def SPIRV_OC_OpConvertSToF : I32EnumAttrCase<"OpConvertSToF", 111>; -def SPIRV_OC_OpConvertUToF : I32EnumAttrCase<"OpConvertUToF", 112>; -def SPIRV_OC_OpUConvert : I32EnumAttrCase<"OpUConvert", 113>; -def SPIRV_OC_OpSConvert : I32EnumAttrCase<"OpSConvert", 114>; -def SPIRV_OC_OpFConvert : I32EnumAttrCase<"OpFConvert", 115>; -def SPIRV_OC_OpConvertPtrToU : I32EnumAttrCase<"OpConvertPtrToU", 117>; -def SPIRV_OC_OpConvertUToPtr : I32EnumAttrCase<"OpConvertUToPtr", 120>; -def SPIRV_OC_OpPtrCastToGeneric : I32EnumAttrCase<"OpPtrCastToGeneric", 121>; -def SPIRV_OC_OpGenericCastToPtr : I32EnumAttrCase<"OpGenericCastToPtr", 122>; -def SPIRV_OC_OpGenericCastToPtrExplicit : I32EnumAttrCase<"OpGenericCastToPtrExplicit", 123>; -def SPIRV_OC_OpBitcast : I32EnumAttrCase<"OpBitcast", 124>; -def SPIRV_OC_OpSNegate : I32EnumAttrCase<"OpSNegate", 126>; -def SPIRV_OC_OpFNegate : I32EnumAttrCase<"OpFNegate", 127>; -def SPIRV_OC_OpIAdd : I32EnumAttrCase<"OpIAdd", 128>; -def SPIRV_OC_OpFAdd : I32EnumAttrCase<"OpFAdd", 129>; -def SPIRV_OC_OpISub : I32EnumAttrCase<"OpISub", 130>; -def SPIRV_OC_OpFSub : I32EnumAttrCase<"OpFSub", 131>; -def SPIRV_OC_OpIMul : I32EnumAttrCase<"OpIMul", 132>; -def SPIRV_OC_OpFMul : I32EnumAttrCase<"OpFMul", 133>; -def SPIRV_OC_OpUDiv : I32EnumAttrCase<"OpUDiv", 134>; -def SPIRV_OC_OpSDiv : I32EnumAttrCase<"OpSDiv", 135>; -def SPIRV_OC_OpFDiv : I32EnumAttrCase<"OpFDiv", 136>; -def SPIRV_OC_OpUMod : I32EnumAttrCase<"OpUMod", 137>; -def SPIRV_OC_OpSRem : I32EnumAttrCase<"OpSRem", 138>; -def SPIRV_OC_OpSMod : I32EnumAttrCase<"OpSMod", 139>; -def SPIRV_OC_OpFRem : I32EnumAttrCase<"OpFRem", 140>; -def SPIRV_OC_OpFMod : I32EnumAttrCase<"OpFMod", 141>; -def SPIRV_OC_OpVectorTimesScalar : I32EnumAttrCase<"OpVectorTimesScalar", 142>; -def SPIRV_OC_OpMatrixTimesScalar : I32EnumAttrCase<"OpMatrixTimesScalar", 143>; -def SPIRV_OC_OpVectorTimesMatrix : I32EnumAttrCase<"OpVectorTimesMatrix", 144>; -def SPIRV_OC_OpMatrixTimesVector : I32EnumAttrCase<"OpMatrixTimesVector", 145>; -def SPIRV_OC_OpMatrixTimesMatrix : I32EnumAttrCase<"OpMatrixTimesMatrix", 146>; -def SPIRV_OC_OpDot : I32EnumAttrCase<"OpDot", 148>; -def SPIRV_OC_OpIAddCarry : I32EnumAttrCase<"OpIAddCarry", 149>; -def SPIRV_OC_OpISubBorrow : I32EnumAttrCase<"OpISubBorrow", 150>; -def SPIRV_OC_OpUMulExtended : I32EnumAttrCase<"OpUMulExtended", 151>; -def SPIRV_OC_OpSMulExtended : I32EnumAttrCase<"OpSMulExtended", 152>; -def SPIRV_OC_OpIsNan : I32EnumAttrCase<"OpIsNan", 156>; -def SPIRV_OC_OpIsInf : I32EnumAttrCase<"OpIsInf", 157>; -def SPIRV_OC_OpOrdered : I32EnumAttrCase<"OpOrdered", 162>; -def SPIRV_OC_OpUnordered : I32EnumAttrCase<"OpUnordered", 163>; -def SPIRV_OC_OpLogicalEqual : I32EnumAttrCase<"OpLogicalEqual", 164>; -def SPIRV_OC_OpLogicalNotEqual : I32EnumAttrCase<"OpLogicalNotEqual", 165>; -def SPIRV_OC_OpLogicalOr : I32EnumAttrCase<"OpLogicalOr", 166>; -def SPIRV_OC_OpLogicalAnd : I32EnumAttrCase<"OpLogicalAnd", 167>; -def SPIRV_OC_OpLogicalNot : I32EnumAttrCase<"OpLogicalNot", 168>; -def SPIRV_OC_OpSelect : I32EnumAttrCase<"OpSelect", 169>; -def SPIRV_OC_OpIEqual : I32EnumAttrCase<"OpIEqual", 170>; -def SPIRV_OC_OpINotEqual : I32EnumAttrCase<"OpINotEqual", 171>; -def SPIRV_OC_OpUGreaterThan : I32EnumAttrCase<"OpUGreaterThan", 172>; -def SPIRV_OC_OpSGreaterThan : I32EnumAttrCase<"OpSGreaterThan", 173>; -def SPIRV_OC_OpUGreaterThanEqual : I32EnumAttrCase<"OpUGreaterThanEqual", 174>; -def SPIRV_OC_OpSGreaterThanEqual : I32EnumAttrCase<"OpSGreaterThanEqual", 175>; -def SPIRV_OC_OpULessThan : I32EnumAttrCase<"OpULessThan", 176>; -def SPIRV_OC_OpSLessThan : I32EnumAttrCase<"OpSLessThan", 177>; -def SPIRV_OC_OpULessThanEqual : I32EnumAttrCase<"OpULessThanEqual", 178>; -def SPIRV_OC_OpSLessThanEqual : I32EnumAttrCase<"OpSLessThanEqual", 179>; -def SPIRV_OC_OpFOrdEqual : I32EnumAttrCase<"OpFOrdEqual", 180>; -def SPIRV_OC_OpFUnordEqual : I32EnumAttrCase<"OpFUnordEqual", 181>; -def SPIRV_OC_OpFOrdNotEqual : I32EnumAttrCase<"OpFOrdNotEqual", 182>; -def SPIRV_OC_OpFUnordNotEqual : I32EnumAttrCase<"OpFUnordNotEqual", 183>; -def SPIRV_OC_OpFOrdLessThan : I32EnumAttrCase<"OpFOrdLessThan", 184>; -def SPIRV_OC_OpFUnordLessThan : I32EnumAttrCase<"OpFUnordLessThan", 185>; -def SPIRV_OC_OpFOrdGreaterThan : I32EnumAttrCase<"OpFOrdGreaterThan", 186>; -def SPIRV_OC_OpFUnordGreaterThan : I32EnumAttrCase<"OpFUnordGreaterThan", 187>; -def SPIRV_OC_OpFOrdLessThanEqual : I32EnumAttrCase<"OpFOrdLessThanEqual", 188>; -def SPIRV_OC_OpFUnordLessThanEqual : I32EnumAttrCase<"OpFUnordLessThanEqual", 189>; -def SPIRV_OC_OpFOrdGreaterThanEqual : I32EnumAttrCase<"OpFOrdGreaterThanEqual", 190>; -def SPIRV_OC_OpFUnordGreaterThanEqual : I32EnumAttrCase<"OpFUnordGreaterThanEqual", 191>; -def SPIRV_OC_OpShiftRightLogical : I32EnumAttrCase<"OpShiftRightLogical", 194>; -def SPIRV_OC_OpShiftRightArithmetic : I32EnumAttrCase<"OpShiftRightArithmetic", 195>; -def SPIRV_OC_OpShiftLeftLogical : I32EnumAttrCase<"OpShiftLeftLogical", 196>; -def SPIRV_OC_OpBitwiseOr : I32EnumAttrCase<"OpBitwiseOr", 197>; -def SPIRV_OC_OpBitwiseXor : I32EnumAttrCase<"OpBitwiseXor", 198>; -def SPIRV_OC_OpBitwiseAnd : I32EnumAttrCase<"OpBitwiseAnd", 199>; -def SPIRV_OC_OpNot : I32EnumAttrCase<"OpNot", 200>; -def SPIRV_OC_OpBitFieldInsert : I32EnumAttrCase<"OpBitFieldInsert", 201>; -def SPIRV_OC_OpBitFieldSExtract : I32EnumAttrCase<"OpBitFieldSExtract", 202>; -def SPIRV_OC_OpBitFieldUExtract : I32EnumAttrCase<"OpBitFieldUExtract", 203>; -def SPIRV_OC_OpBitReverse : I32EnumAttrCase<"OpBitReverse", 204>; -def SPIRV_OC_OpBitCount : I32EnumAttrCase<"OpBitCount", 205>; -def SPIRV_OC_OpEmitVertex : I32EnumAttrCase<"OpEmitVertex", 218>; -def SPIRV_OC_OpEndPrimitive : I32EnumAttrCase<"OpEndPrimitive", 219>; -def SPIRV_OC_OpControlBarrier : I32EnumAttrCase<"OpControlBarrier", 224>; -def SPIRV_OC_OpMemoryBarrier : I32EnumAttrCase<"OpMemoryBarrier", 225>; -def SPIRV_OC_OpAtomicExchange : I32EnumAttrCase<"OpAtomicExchange", 229>; -def SPIRV_OC_OpAtomicCompareExchange : I32EnumAttrCase<"OpAtomicCompareExchange", 230>; -def SPIRV_OC_OpAtomicCompareExchangeWeak : I32EnumAttrCase<"OpAtomicCompareExchangeWeak", 231>; -def SPIRV_OC_OpAtomicIIncrement : I32EnumAttrCase<"OpAtomicIIncrement", 232>; -def SPIRV_OC_OpAtomicIDecrement : I32EnumAttrCase<"OpAtomicIDecrement", 233>; -def SPIRV_OC_OpAtomicIAdd : I32EnumAttrCase<"OpAtomicIAdd", 234>; -def SPIRV_OC_OpAtomicISub : I32EnumAttrCase<"OpAtomicISub", 235>; -def SPIRV_OC_OpAtomicSMin : I32EnumAttrCase<"OpAtomicSMin", 236>; -def SPIRV_OC_OpAtomicUMin : I32EnumAttrCase<"OpAtomicUMin", 237>; -def SPIRV_OC_OpAtomicSMax : I32EnumAttrCase<"OpAtomicSMax", 238>; -def SPIRV_OC_OpAtomicUMax : I32EnumAttrCase<"OpAtomicUMax", 239>; -def SPIRV_OC_OpAtomicAnd : I32EnumAttrCase<"OpAtomicAnd", 240>; -def SPIRV_OC_OpAtomicOr : I32EnumAttrCase<"OpAtomicOr", 241>; -def SPIRV_OC_OpAtomicXor : I32EnumAttrCase<"OpAtomicXor", 242>; -def SPIRV_OC_OpPhi : I32EnumAttrCase<"OpPhi", 245>; -def SPIRV_OC_OpLoopMerge : I32EnumAttrCase<"OpLoopMerge", 246>; -def SPIRV_OC_OpSelectionMerge : I32EnumAttrCase<"OpSelectionMerge", 247>; -def SPIRV_OC_OpLabel : I32EnumAttrCase<"OpLabel", 248>; -def SPIRV_OC_OpBranch : I32EnumAttrCase<"OpBranch", 249>; -def SPIRV_OC_OpBranchConditional : I32EnumAttrCase<"OpBranchConditional", 250>; -def SPIRV_OC_OpReturn : I32EnumAttrCase<"OpReturn", 253>; -def SPIRV_OC_OpReturnValue : I32EnumAttrCase<"OpReturnValue", 254>; -def SPIRV_OC_OpUnreachable : I32EnumAttrCase<"OpUnreachable", 255>; -def SPIRV_OC_OpGroupBroadcast : I32EnumAttrCase<"OpGroupBroadcast", 263>; -def SPIRV_OC_OpGroupIAdd : I32EnumAttrCase<"OpGroupIAdd", 264>; -def SPIRV_OC_OpGroupFAdd : I32EnumAttrCase<"OpGroupFAdd", 265>; -def SPIRV_OC_OpGroupFMin : I32EnumAttrCase<"OpGroupFMin", 266>; -def SPIRV_OC_OpGroupUMin : I32EnumAttrCase<"OpGroupUMin", 267>; -def SPIRV_OC_OpGroupSMin : I32EnumAttrCase<"OpGroupSMin", 268>; -def SPIRV_OC_OpGroupFMax : I32EnumAttrCase<"OpGroupFMax", 269>; -def SPIRV_OC_OpGroupUMax : I32EnumAttrCase<"OpGroupUMax", 270>; -def SPIRV_OC_OpGroupSMax : I32EnumAttrCase<"OpGroupSMax", 271>; -def SPIRV_OC_OpNoLine : I32EnumAttrCase<"OpNoLine", 317>; -def SPIRV_OC_OpModuleProcessed : I32EnumAttrCase<"OpModuleProcessed", 330>; -def SPIRV_OC_OpGroupNonUniformElect : I32EnumAttrCase<"OpGroupNonUniformElect", 333>; -def SPIRV_OC_OpGroupNonUniformBroadcast : I32EnumAttrCase<"OpGroupNonUniformBroadcast", 337>; -def SPIRV_OC_OpGroupNonUniformBallot : I32EnumAttrCase<"OpGroupNonUniformBallot", 339>; -def SPIRV_OC_OpGroupNonUniformBallotFindLSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindLSB", 343>; -def SPIRV_OC_OpGroupNonUniformBallotFindMSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindMSB", 344>; -def SPIRV_OC_OpGroupNonUniformShuffle : I32EnumAttrCase<"OpGroupNonUniformShuffle", 345>; -def SPIRV_OC_OpGroupNonUniformShuffleXor : I32EnumAttrCase<"OpGroupNonUniformShuffleXor", 346>; -def SPIRV_OC_OpGroupNonUniformShuffleUp : I32EnumAttrCase<"OpGroupNonUniformShuffleUp", 347>; -def SPIRV_OC_OpGroupNonUniformShuffleDown : I32EnumAttrCase<"OpGroupNonUniformShuffleDown", 348>; -def SPIRV_OC_OpGroupNonUniformIAdd : I32EnumAttrCase<"OpGroupNonUniformIAdd", 349>; -def SPIRV_OC_OpGroupNonUniformFAdd : I32EnumAttrCase<"OpGroupNonUniformFAdd", 350>; -def SPIRV_OC_OpGroupNonUniformIMul : I32EnumAttrCase<"OpGroupNonUniformIMul", 351>; -def SPIRV_OC_OpGroupNonUniformFMul : I32EnumAttrCase<"OpGroupNonUniformFMul", 352>; -def SPIRV_OC_OpGroupNonUniformSMin : I32EnumAttrCase<"OpGroupNonUniformSMin", 353>; -def SPIRV_OC_OpGroupNonUniformUMin : I32EnumAttrCase<"OpGroupNonUniformUMin", 354>; -def SPIRV_OC_OpGroupNonUniformFMin : I32EnumAttrCase<"OpGroupNonUniformFMin", 355>; -def SPIRV_OC_OpGroupNonUniformSMax : I32EnumAttrCase<"OpGroupNonUniformSMax", 356>; -def SPIRV_OC_OpGroupNonUniformUMax : I32EnumAttrCase<"OpGroupNonUniformUMax", 357>; -def SPIRV_OC_OpGroupNonUniformFMax : I32EnumAttrCase<"OpGroupNonUniformFMax", 358>; -def SPIRV_OC_OpGroupNonUniformBitwiseAnd : I32EnumAttrCase<"OpGroupNonUniformBitwiseAnd", 359>; -def SPIRV_OC_OpGroupNonUniformBitwiseOr : I32EnumAttrCase<"OpGroupNonUniformBitwiseOr", 360>; -def SPIRV_OC_OpGroupNonUniformBitwiseXor : I32EnumAttrCase<"OpGroupNonUniformBitwiseXor", 361>; -def SPIRV_OC_OpGroupNonUniformLogicalAnd : I32EnumAttrCase<"OpGroupNonUniformLogicalAnd", 362>; -def SPIRV_OC_OpGroupNonUniformLogicalOr : I32EnumAttrCase<"OpGroupNonUniformLogicalOr", 363>; -def SPIRV_OC_OpGroupNonUniformLogicalXor : I32EnumAttrCase<"OpGroupNonUniformLogicalXor", 364>; -def SPIRV_OC_OpSubgroupBallotKHR : I32EnumAttrCase<"OpSubgroupBallotKHR", 4421>; -def SPIRV_OC_OpSDot : I32EnumAttrCase<"OpSDot", 4450>; -def SPIRV_OC_OpUDot : I32EnumAttrCase<"OpUDot", 4451>; -def SPIRV_OC_OpSUDot : I32EnumAttrCase<"OpSUDot", 4452>; -def SPIRV_OC_OpSDotAccSat : I32EnumAttrCase<"OpSDotAccSat", 4453>; -def SPIRV_OC_OpUDotAccSat : I32EnumAttrCase<"OpUDotAccSat", 4454>; -def SPIRV_OC_OpSUDotAccSat : I32EnumAttrCase<"OpSUDotAccSat", 4455>; -def SPIRV_OC_OpTypeCooperativeMatrixKHR : I32EnumAttrCase<"OpTypeCooperativeMatrixKHR", 4456>; -def SPIRV_OC_OpCooperativeMatrixLoadKHR : I32EnumAttrCase<"OpCooperativeMatrixLoadKHR", 4457>; -def SPIRV_OC_OpCooperativeMatrixStoreKHR : I32EnumAttrCase<"OpCooperativeMatrixStoreKHR", 4458>; -def SPIRV_OC_OpCooperativeMatrixMulAddKHR : I32EnumAttrCase<"OpCooperativeMatrixMulAddKHR", 4459>; -def SPIRV_OC_OpCooperativeMatrixLengthKHR : I32EnumAttrCase<"OpCooperativeMatrixLengthKHR", 4460>; -def SPIRV_OC_OpSubgroupBlockReadINTEL : I32EnumAttrCase<"OpSubgroupBlockReadINTEL", 5575>; -def SPIRV_OC_OpSubgroupBlockWriteINTEL : I32EnumAttrCase<"OpSubgroupBlockWriteINTEL", 5576>; -def SPIRV_OC_OpAssumeTrueKHR : I32EnumAttrCase<"OpAssumeTrueKHR", 5630>; -def SPIRV_OC_OpAtomicFAddEXT : I32EnumAttrCase<"OpAtomicFAddEXT", 6035>; -def SPIRV_OC_OpConvertFToBF16INTEL : I32EnumAttrCase<"OpConvertFToBF16INTEL", 6116>; -def SPIRV_OC_OpConvertBF16ToFINTEL : I32EnumAttrCase<"OpConvertBF16ToFINTEL", 6117>; -def SPIRV_OC_OpControlBarrierArriveINTEL : I32EnumAttrCase<"OpControlBarrierArriveINTEL", 6142>; -def SPIRV_OC_OpControlBarrierWaitINTEL : I32EnumAttrCase<"OpControlBarrierWaitINTEL", 6143>; -def SPIRV_OC_OpGroupIMulKHR : I32EnumAttrCase<"OpGroupIMulKHR", 6401>; -def SPIRV_OC_OpGroupFMulKHR : I32EnumAttrCase<"OpGroupFMulKHR", 6402>; +def SPIRV_OC_OpNop : I32EnumAttrCase<"OpNop", 0>; +def SPIRV_OC_OpUndef : I32EnumAttrCase<"OpUndef", 1>; +def SPIRV_OC_OpSourceContinued : I32EnumAttrCase<"OpSourceContinued", 2>; +def SPIRV_OC_OpSource : I32EnumAttrCase<"OpSource", 3>; +def SPIRV_OC_OpSourceExtension : I32EnumAttrCase<"OpSourceExtension", 4>; +def SPIRV_OC_OpName : I32EnumAttrCase<"OpName", 5>; +def SPIRV_OC_OpMemberName : I32EnumAttrCase<"OpMemberName", 6>; +def SPIRV_OC_OpString : I32EnumAttrCase<"OpString", 7>; +def SPIRV_OC_OpLine : I32EnumAttrCase<"OpLine", 8>; +def SPIRV_OC_OpExtension : I32EnumAttrCase<"OpExtension", 10>; +def SPIRV_OC_OpExtInstImport : I32EnumAttrCase<"OpExtInstImport", 11>; +def SPIRV_OC_OpExtInst : I32EnumAttrCase<"OpExtInst", 12>; +def SPIRV_OC_OpMemoryModel : I32EnumAttrCase<"OpMemoryModel", 14>; +def SPIRV_OC_OpEntryPoint : I32EnumAttrCase<"OpEntryPoint", 15>; +def SPIRV_OC_OpExecutionMode : I32EnumAttrCase<"OpExecutionMode", 16>; +def SPIRV_OC_OpCapability : I32EnumAttrCase<"OpCapability", 17>; +def SPIRV_OC_OpTypeVoid : I32EnumAttrCase<"OpTypeVoid", 19>; +def SPIRV_OC_OpTypeBool : I32EnumAttrCase<"OpTypeBool", 20>; +def SPIRV_OC_OpTypeInt : I32EnumAttrCase<"OpTypeInt", 21>; +def SPIRV_OC_OpTypeFloat : I32EnumAttrCase<"OpTypeFloat", 22>; +def SPIRV_OC_OpTypeVector : I32EnumAttrCase<"OpTypeVector", 23>; +def SPIRV_OC_OpTypeMatrix : I32EnumAttrCase<"OpTypeMatrix", 24>; +def SPIRV_OC_OpTypeImage : I32EnumAttrCase<"OpTypeImage", 25>; +def SPIRV_OC_OpTypeSampledImage : I32EnumAttrCase<"OpTypeSampledImage", 27>; +def SPIRV_OC_OpTypeArray : I32EnumAttrCase<"OpTypeArray", 28>; +def SPIRV_OC_OpTypeRuntimeArray : I32EnumAttrCase<"OpTypeRuntimeArray", 29>; +def SPIRV_OC_OpTypeStruct : I32EnumAttrCase<"OpTypeStruct", 30>; +def SPIRV_OC_OpTypePointer : I32EnumAttrCase<"OpTypePointer", 32>; +def SPIRV_OC_OpTypeFunction : I32EnumAttrCase<"OpTypeFunction", 33>; +def SPIRV_OC_OpTypeForwardPointer : I32EnumAttrCase<"OpTypeForwardPointer", 39>; +def SPIRV_OC_OpConstantTrue : I32EnumAttrCase<"OpConstantTrue", 41>; +def SPIRV_OC_OpConstantFalse : I32EnumAttrCase<"OpConstantFalse", 42>; +def SPIRV_OC_OpConstant : I32EnumAttrCase<"OpConstant", 43>; +def SPIRV_OC_OpConstantComposite : I32EnumAttrCase<"OpConstantComposite", 44>; +def SPIRV_OC_OpConstantNull : I32EnumAttrCase<"OpConstantNull", 46>; +def SPIRV_OC_OpSpecConstantTrue : I32EnumAttrCase<"OpSpecConstantTrue", 48>; +def SPIRV_OC_OpSpecConstantFalse : I32EnumAttrCase<"OpSpecConstantFalse", 49>; +def SPIRV_OC_OpSpecConstant : I32EnumAttrCase<"OpSpecConstant", 50>; +def SPIRV_OC_OpSpecConstantComposite : I32EnumAttrCase<"OpSpecConstantComposite", 51>; +def SPIRV_OC_OpSpecConstantOp : I32EnumAttrCase<"OpSpecConstantOp", 52>; +def SPIRV_OC_OpFunction : I32EnumAttrCase<"OpFunction", 54>; +def SPIRV_OC_OpFunctionParameter : I32EnumAttrCase<"OpFunctionParameter", 55>; +def SPIRV_OC_OpFunctionEnd : I32EnumAttrCase<"OpFunctionEnd", 56>; +def SPIRV_OC_OpFunctionCall : I32EnumAttrCase<"OpFunctionCall", 57>; +def SPIRV_OC_OpVariable : I32EnumAttrCase<"OpVariable", 59>; +def SPIRV_OC_OpLoad : I32EnumAttrCase<"OpLoad", 61>; +def SPIRV_OC_OpStore : I32EnumAttrCase<"OpStore", 62>; +def SPIRV_OC_OpCopyMemory : I32EnumAttrCase<"OpCopyMemory", 63>; +def SPIRV_OC_OpAccessChain : I32EnumAttrCase<"OpAccessChain", 65>; +def SPIRV_OC_OpPtrAccessChain : I32EnumAttrCase<"OpPtrAccessChain", 67>; +def SPIRV_OC_OpInBoundsPtrAccessChain : I32EnumAttrCase<"OpInBoundsPtrAccessChain", 70>; +def SPIRV_OC_OpDecorate : I32EnumAttrCase<"OpDecorate", 71>; +def SPIRV_OC_OpMemberDecorate : I32EnumAttrCase<"OpMemberDecorate", 72>; +def SPIRV_OC_OpVectorExtractDynamic : I32EnumAttrCase<"OpVectorExtractDynamic", 77>; +def SPIRV_OC_OpVectorInsertDynamic : I32EnumAttrCase<"OpVectorInsertDynamic", 78>; +def SPIRV_OC_OpVectorShuffle : I32EnumAttrCase<"OpVectorShuffle", 79>; +def SPIRV_OC_OpCompositeConstruct : I32EnumAttrCase<"OpCompositeConstruct", 80>; +def SPIRV_OC_OpCompositeExtract : I32EnumAttrCase<"OpCompositeExtract", 81>; +def SPIRV_OC_OpCompositeInsert : I32EnumAttrCase<"OpCompositeInsert", 82>; +def SPIRV_OC_OpTranspose : I32EnumAttrCase<"OpTranspose", 84>; +def SPIRV_OC_OpImageDrefGather : I32EnumAttrCase<"OpImageDrefGather", 97>; +def SPIRV_OC_OpImageWrite : I32EnumAttrCase<"OpImageWrite", 99>; +def SPIRV_OC_OpImage : I32EnumAttrCase<"OpImage", 100>; +def SPIRV_OC_OpImageQuerySize : I32EnumAttrCase<"OpImageQuerySize", 104>; +def SPIRV_OC_OpConvertFToU : I32EnumAttrCase<"OpConvertFToU", 109>; +def SPIRV_OC_OpConvertFToS : I32EnumAttrCase<"OpConvertFToS", 110>; +def SPIRV_OC_OpConvertSToF : I32EnumAttrCase<"OpConvertSToF", 111>; +def SPIRV_OC_OpConvertUToF : I32EnumAttrCase<"OpConvertUToF", 112>; +def SPIRV_OC_OpUConvert : I32EnumAttrCase<"OpUConvert", 113>; +def SPIRV_OC_OpSConvert : I32EnumAttrCase<"OpSConvert", 114>; +def SPIRV_OC_OpFConvert : I32EnumAttrCase<"OpFConvert", 115>; +def SPIRV_OC_OpConvertPtrToU : I32EnumAttrCase<"OpConvertPtrToU", 117>; +def SPIRV_OC_OpConvertUToPtr : I32EnumAttrCase<"OpConvertUToPtr", 120>; +def SPIRV_OC_OpPtrCastToGeneric : I32EnumAttrCase<"OpPtrCastToGeneric", 121>; +def SPIRV_OC_OpGenericCastToPtr : I32EnumAttrCase<"OpGenericCastToPtr", 122>; +def SPIRV_OC_OpGenericCastToPtrExplicit : I32EnumAttrCase<"OpGenericCastToPtrExplicit", 123>; +def SPIRV_OC_OpBitcast : I32EnumAttrCase<"OpBitcast", 124>; +def SPIRV_OC_OpSNegate : I32EnumAttrCase<"OpSNegate", 126>; +def SPIRV_OC_OpFNegate : I32EnumAttrCase<"OpFNegate", 127>; +def SPIRV_OC_OpIAdd : I32EnumAttrCase<"OpIAdd", 128>; +def SPIRV_OC_OpFAdd : I32EnumAttrCase<"OpFAdd", 129>; +def SPIRV_OC_OpISub : I32EnumAttrCase<"OpISub", 130>; +def SPIRV_OC_OpFSub : I32EnumAttrCase<"OpFSub", 131>; +def SPIRV_OC_OpIMul : I32EnumAttrCase<"OpIMul", 132>; +def SPIRV_OC_OpFMul : I32EnumAttrCase<"OpFMul", 133>; +def SPIRV_OC_OpUDiv : I32EnumAttrCase<"OpUDiv", 134>; +def SPIRV_OC_OpSDiv : I32EnumAttrCase<"OpSDiv", 135>; +def SPIRV_OC_OpFDiv : I32EnumAttrCase<"OpFDiv", 136>; +def SPIRV_OC_OpUMod : I32EnumAttrCase<"OpUMod", 137>; +def SPIRV_OC_OpSRem : I32EnumAttrCase<"OpSRem", 138>; +def SPIRV_OC_OpSMod : I32EnumAttrCase<"OpSMod", 139>; +def SPIRV_OC_OpFRem : I32EnumAttrCase<"OpFRem", 140>; +def SPIRV_OC_OpFMod : I32EnumAttrCase<"OpFMod", 141>; +def SPIRV_OC_OpVectorTimesScalar : I32EnumAttrCase<"OpVectorTimesScalar", 142>; +def SPIRV_OC_OpMatrixTimesScalar : I32EnumAttrCase<"OpMatrixTimesScalar", 143>; +def SPIRV_OC_OpVectorTimesMatrix : I32EnumAttrCase<"OpVectorTimesMatrix", 144>; +def SPIRV_OC_OpMatrixTimesVector : I32EnumAttrCase<"OpMatrixTimesVector", 145>; +def SPIRV_OC_OpMatrixTimesMatrix : I32EnumAttrCase<"OpMatrixTimesMatrix", 146>; +def SPIRV_OC_OpDot : I32EnumAttrCase<"OpDot", 148>; +def SPIRV_OC_OpIAddCarry : I32EnumAttrCase<"OpIAddCarry", 149>; +def SPIRV_OC_OpISubBorrow : I32EnumAttrCase<"OpISubBorrow", 150>; +def SPIRV_OC_OpUMulExtended : I32EnumAttrCase<"OpUMulExtended", 151>; +def SPIRV_OC_OpSMulExtended : I32EnumAttrCase<"OpSMulExtended", 152>; +def SPIRV_OC_OpIsNan : I32EnumAttrCase<"OpIsNan", 156>; +def SPIRV_OC_OpIsInf : I32EnumAttrCase<"OpIsInf", 157>; +def SPIRV_OC_OpOrdered : I32EnumAttrCase<"OpOrdered", 162>; +def SPIRV_OC_OpUnordered : I32EnumAttrCase<"OpUnordered", 163>; +def SPIRV_OC_OpLogicalEqual : I32EnumAttrCase<"OpLogicalEqual", 164>; +def SPIRV_OC_OpLogicalNotEqual : I32EnumAttrCase<"OpLogicalNotEqual", 165>; +def SPIRV_OC_OpLogicalOr : I32EnumAttrCase<"OpLogicalOr", 166>; +def SPIRV_OC_OpLogicalAnd : I32EnumAttrCase<"OpLogicalAnd", 167>; +def SPIRV_OC_OpLogicalNot : I32EnumAttrCase<"OpLogicalNot", 168>; +def SPIRV_OC_OpSelect : I32EnumAttrCase<"OpSelect", 169>; +def SPIRV_OC_OpIEqual : I32EnumAttrCase<"OpIEqual", 170>; +def SPIRV_OC_OpINotEqual : I32EnumAttrCase<"OpINotEqual", 171>; +def SPIRV_OC_OpUGreaterThan : I32EnumAttrCase<"OpUGreaterThan", 172>; +def SPIRV_OC_OpSGreaterThan : I32EnumAttrCase<"OpSGreaterThan", 173>; +def SPIRV_OC_OpUGreaterThanEqual : I32EnumAttrCase<"OpUGreaterThanEqual", 174>; +def SPIRV_OC_OpSGreaterThanEqual : I32EnumAttrCase<"OpSGreaterThanEqual", 175>; +def SPIRV_OC_OpULessThan : I32EnumAttrCase<"OpULessThan", 176>; +def SPIRV_OC_OpSLessThan : I32EnumAttrCase<"OpSLessThan", 177>; +def SPIRV_OC_OpULessThanEqual : I32EnumAttrCase<"OpULessThanEqual", 178>; +def SPIRV_OC_OpSLessThanEqual : I32EnumAttrCase<"OpSLessThanEqual", 179>; +def SPIRV_OC_OpFOrdEqual : I32EnumAttrCase<"OpFOrdEqual", 180>; +def SPIRV_OC_OpFUnordEqual : I32EnumAttrCase<"OpFUnordEqual", 181>; +def SPIRV_OC_OpFOrdNotEqual : I32EnumAttrCase<"OpFOrdNotEqual", 182>; +def SPIRV_OC_OpFUnordNotEqual : I32EnumAttrCase<"OpFUnordNotEqual", 183>; +def SPIRV_OC_OpFOrdLessThan : I32EnumAttrCase<"OpFOrdLessThan", 184>; +def SPIRV_OC_OpFUnordLessThan : I32EnumAttrCase<"OpFUnordLessThan", 185>; +def SPIRV_OC_OpFOrdGreaterThan : I32EnumAttrCase<"OpFOrdGreaterThan", 186>; +def SPIRV_OC_OpFUnordGreaterThan : I32EnumAttrCase<"OpFUnordGreaterThan", 187>; +def SPIRV_OC_OpFOrdLessThanEqual : I32EnumAttrCase<"OpFOrdLessThanEqual", 188>; +def SPIRV_OC_OpFUnordLessThanEqual : I32EnumAttrCase<"OpFUnordLessThanEqual", 189>; +def SPIRV_OC_OpFOrdGreaterThanEqual : I32EnumAttrCase<"OpFOrdGreaterThanEqual", 190>; +def SPIRV_OC_OpFUnordGreaterThanEqual : I32EnumAttrCase<"OpFUnordGreaterThanEqual", 191>; +def SPIRV_OC_OpShiftRightLogical : I32EnumAttrCase<"OpShiftRightLogical", 194>; +def SPIRV_OC_OpShiftRightArithmetic : I32EnumAttrCase<"OpShiftRightArithmetic", 195>; +def SPIRV_OC_OpShiftLeftLogical : I32EnumAttrCase<"OpShiftLeftLogical", 196>; +def SPIRV_OC_OpBitwiseOr : I32EnumAttrCase<"OpBitwiseOr", 197>; +def SPIRV_OC_OpBitwiseXor : I32EnumAttrCase<"OpBitwiseXor", 198>; +def SPIRV_OC_OpBitwiseAnd : I32EnumAttrCase<"OpBitwiseAnd", 199>; +def SPIRV_OC_OpNot : I32EnumAttrCase<"OpNot", 200>; +def SPIRV_OC_OpBitFieldInsert : I32EnumAttrCase<"OpBitFieldInsert", 201>; +def SPIRV_OC_OpBitFieldSExtract : I32EnumAttrCase<"OpBitFieldSExtract", 202>; +def SPIRV_OC_OpBitFieldUExtract : I32EnumAttrCase<"OpBitFieldUExtract", 203>; +def SPIRV_OC_OpBitReverse : I32EnumAttrCase<"OpBitReverse", 204>; +def SPIRV_OC_OpBitCount : I32EnumAttrCase<"OpBitCount", 205>; +def SPIRV_OC_OpEmitVertex : I32EnumAttrCase<"OpEmitVertex", 218>; +def SPIRV_OC_OpEndPrimitive : I32EnumAttrCase<"OpEndPrimitive", 219>; +def SPIRV_OC_OpControlBarrier : I32EnumAttrCase<"OpControlBarrier", 224>; +def SPIRV_OC_OpMemoryBarrier : I32EnumAttrCase<"OpMemoryBarrier", 225>; +def SPIRV_OC_OpAtomicExchange : I32EnumAttrCase<"OpAtomicExchange", 229>; +def SPIRV_OC_OpAtomicCompareExchange : I32EnumAttrCase<"OpAtomicCompareExchange", 230>; +def SPIRV_OC_OpAtomicCompareExchangeWeak : I32EnumAttrCase<"OpAtomicCompareExchangeWeak", 231>; +def SPIRV_OC_OpAtomicIIncrement : I32EnumAttrCase<"OpAtomicIIncrement", 232>; +def SPIRV_OC_OpAtomicIDecrement : I32EnumAttrCase<"OpAtomicIDecrement", 233>; +def SPIRV_OC_OpAtomicIAdd : I32EnumAttrCase<"OpAtomicIAdd", 234>; +def SPIRV_OC_OpAtomicISub : I32EnumAttrCase<"OpAtomicISub", 235>; +def SPIRV_OC_OpAtomicSMin : I32EnumAttrCase<"OpAtomicSMin", 236>; +def SPIRV_OC_OpAtomicUMin : I32EnumAttrCase<"OpAtomicUMin", 237>; +def SPIRV_OC_OpAtomicSMax : I32EnumAttrCase<"OpAtomicSMax", 238>; +def SPIRV_OC_OpAtomicUMax : I32EnumAttrCase<"OpAtomicUMax", 239>; +def SPIRV_OC_OpAtomicAnd : I32EnumAttrCase<"OpAtomicAnd", 240>; +def SPIRV_OC_OpAtomicOr : I32EnumAttrCase<"OpAtomicOr", 241>; +def SPIRV_OC_OpAtomicXor : I32EnumAttrCase<"OpAtomicXor", 242>; +def SPIRV_OC_OpPhi : I32EnumAttrCase<"OpPhi", 245>; +def SPIRV_OC_OpLoopMerge : I32EnumAttrCase<"OpLoopMerge", 246>; +def SPIRV_OC_OpSelectionMerge : I32EnumAttrCase<"OpSelectionMerge", 247>; +def SPIRV_OC_OpLabel : I32EnumAttrCase<"OpLabel", 248>; +def SPIRV_OC_OpBranch : I32EnumAttrCase<"OpBranch", 249>; +def SPIRV_OC_OpBranchConditional : I32EnumAttrCase<"OpBranchConditional", 250>; +def SPIRV_OC_OpReturn : I32EnumAttrCase<"OpReturn", 253>; +def SPIRV_OC_OpReturnValue : I32EnumAttrCase<"OpReturnValue", 254>; +def SPIRV_OC_OpUnreachable : I32EnumAttrCase<"OpUnreachable", 255>; +def SPIRV_OC_OpGroupBroadcast : I32EnumAttrCase<"OpGroupBroadcast", 263>; +def SPIRV_OC_OpGroupIAdd : I32EnumAttrCase<"OpGroupIAdd", 264>; +def SPIRV_OC_OpGroupFAdd : I32EnumAttrCase<"OpGroupFAdd", 265>; +def SPIRV_OC_OpGroupFMin : I32EnumAttrCase<"OpGroupFMin", 266>; +def SPIRV_OC_OpGroupUMin : I32EnumAttrCase<"OpGroupUMin", 267>; +def SPIRV_OC_OpGroupSMin : I32EnumAttrCase<"OpGroupSMin", 268>; +def SPIRV_OC_OpGroupFMax : I32EnumAttrCase<"OpGroupFMax", 269>; +def SPIRV_OC_OpGroupUMax : I32EnumAttrCase<"OpGroupUMax", 270>; +def SPIRV_OC_OpGroupSMax : I32EnumAttrCase<"OpGroupSMax", 271>; +def SPIRV_OC_OpNoLine : I32EnumAttrCase<"OpNoLine", 317>; +def SPIRV_OC_OpModuleProcessed : I32EnumAttrCase<"OpModuleProcessed", 330>; +def SPIRV_OC_OpGroupNonUniformElect : I32EnumAttrCase<"OpGroupNonUniformElect", 333>; +def SPIRV_OC_OpGroupNonUniformBroadcast : I32EnumAttrCase<"OpGroupNonUniformBroadcast", 337>; +def SPIRV_OC_OpGroupNonUniformBallot : I32EnumAttrCase<"OpGroupNonUniformBallot", 339>; +def SPIRV_OC_OpGroupNonUniformBallotBitCount : I32EnumAttrCase<"OpGroupNonUniformBallotBitCount", 342>; +def SPIRV_OC_OpGroupNonUniformBallotFindLSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindLSB", 343>; +def SPIRV_OC_OpGroupNonUniformBallotFindMSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindMSB", 344>; +def SPIRV_OC_OpGroupNonUniformShuffle : I32EnumAttrCase<"OpGroupNonUniformShuffle", 345>; +def SPIRV_OC_OpGroupNonUniformShuffleXor : I32EnumAttrCase<"OpGroupNonUniformShuffleXor", 346>; +def SPIRV_OC_OpGroupNonUniformShuffleUp : I32EnumAttrCase<"OpGroupNonUniformShuffleUp", 347>; +def SPIRV_OC_OpGroupNonUniformShuffleDown : I32EnumAttrCase<"OpGroupNonUniformShuffleDown", 348>; +def SPIRV_OC_OpGroupNonUniformIAdd : I32EnumAttrCase<"OpGroupNonUniformIAdd", 349>; +def SPIRV_OC_OpGroupNonUniformFAdd : I32EnumAttrCase<"OpGroupNonUniformFAdd", 350>; +def SPIRV_OC_OpGroupNonUniformIMul : I32EnumAttrCase<"OpGroupNonUniformIMul", 351>; +def SPIRV_OC_OpGroupNonUniformFMul : I32EnumAttrCase<"OpGroupNonUniformFMul", 352>; +def SPIRV_OC_OpGroupNonUniformSMin : I32EnumAttrCase<"OpGroupNonUniformSMin", 353>; +def SPIRV_OC_OpGroupNonUniformUMin : I32EnumAttrCase<"OpGroupNonUniformUMin", 354>; +def SPIRV_OC_OpGroupNonUniformFMin : I32EnumAttrCase<"OpGroupNonUniformFMin", 355>; +def SPIRV_OC_OpGroupNonUniformSMax : I32EnumAttrCase<"OpGroupNonUniformSMax", 356>; +def SPIRV_OC_OpGroupNonUniformUMax : I32EnumAttrCase<"OpGroupNonUniformUMax", 357>; +def SPIRV_OC_OpGroupNonUniformFMax : I32EnumAttrCase<"OpGroupNonUniformFMax", 358>; +def SPIRV_OC_OpGroupNonUniformBitwiseAnd : I32EnumAttrCase<"OpGroupNonUniformBitwiseAnd", 359>; +def SPIRV_OC_OpGroupNonUniformBitwiseOr : I32EnumAttrCase<"OpGroupNonUniformBitwiseOr", 360>; +def SPIRV_OC_OpGroupNonUniformBitwiseXor : I32EnumAttrCase<"OpGroupNonUniformBitwiseXor", 361>; +def SPIRV_OC_OpGroupNonUniformLogicalAnd : I32EnumAttrCase<"OpGroupNonUniformLogicalAnd", 362>; +def SPIRV_OC_OpGroupNonUniformLogicalOr : I32EnumAttrCase<"OpGroupNonUniformLogicalOr", 363>; +def SPIRV_OC_OpGroupNonUniformLogicalXor : I32EnumAttrCase<"OpGroupNonUniformLogicalXor", 364>; +def SPIRV_OC_OpSubgroupBallotKHR : I32EnumAttrCase<"OpSubgroupBallotKHR", 4421>; +def SPIRV_OC_OpSDot : I32EnumAttrCase<"OpSDot", 4450>; +def SPIRV_OC_OpUDot : I32EnumAttrCase<"OpUDot", 4451>; +def SPIRV_OC_OpSUDot : I32EnumAttrCase<"OpSUDot", 4452>; +def SPIRV_OC_OpSDotAccSat : I32EnumAttrCase<"OpSDotAccSat", 4453>; +def SPIRV_OC_OpUDotAccSat : I32EnumAttrCase<"OpUDotAccSat", 4454>; +def SPIRV_OC_OpSUDotAccSat : I32EnumAttrCase<"OpSUDotAccSat", 4455>; +def SPIRV_OC_OpTypeCooperativeMatrixKHR : I32EnumAttrCase<"OpTypeCooperativeMatrixKHR", 4456>; +def SPIRV_OC_OpCooperativeMatrixLoadKHR : I32EnumAttrCase<"OpCooperativeMatrixLoadKHR", 4457>; +def SPIRV_OC_OpCooperativeMatrixStoreKHR : I32EnumAttrCase<"OpCooperativeMatrixStoreKHR", 4458>; +def SPIRV_OC_OpCooperativeMatrixMulAddKHR : I32EnumAttrCase<"OpCooperativeMatrixMulAddKHR", 4459>; +def SPIRV_OC_OpCooperativeMatrixLengthKHR : I32EnumAttrCase<"OpCooperativeMatrixLengthKHR", 4460>; +def SPIRV_OC_OpSubgroupBlockReadINTEL : I32EnumAttrCase<"OpSubgroupBlockReadINTEL", 5575>; +def SPIRV_OC_OpSubgroupBlockWriteINTEL : I32EnumAttrCase<"OpSubgroupBlockWriteINTEL", 5576>; +def SPIRV_OC_OpAssumeTrueKHR : I32EnumAttrCase<"OpAssumeTrueKHR", 5630>; +def SPIRV_OC_OpAtomicFAddEXT : I32EnumAttrCase<"OpAtomicFAddEXT", 6035>; +def SPIRV_OC_OpConvertFToBF16INTEL : I32EnumAttrCase<"OpConvertFToBF16INTEL", 6116>; +def SPIRV_OC_OpConvertBF16ToFINTEL : I32EnumAttrCase<"OpConvertBF16ToFINTEL", 6117>; +def SPIRV_OC_OpControlBarrierArriveINTEL : I32EnumAttrCase<"OpControlBarrierArriveINTEL", 6142>; +def SPIRV_OC_OpControlBarrierWaitINTEL : I32EnumAttrCase<"OpControlBarrierWaitINTEL", 6143>; +def SPIRV_OC_OpGroupIMulKHR : I32EnumAttrCase<"OpGroupIMulKHR", 6401>; +def SPIRV_OC_OpGroupFMulKHR : I32EnumAttrCase<"OpGroupFMulKHR", 6402>; def SPIRV_OpcodeAttr : SPIRV_I32EnumAttr<"Opcode", "valid SPIR-V instructions", "opcode", [ @@ -4604,6 +4605,7 @@ def SPIRV_OpcodeAttr : SPIRV_OC_OpGroupFMax, SPIRV_OC_OpGroupUMax, SPIRV_OC_OpGroupSMax, SPIRV_OC_OpNoLine, SPIRV_OC_OpModuleProcessed, SPIRV_OC_OpGroupNonUniformElect, SPIRV_OC_OpGroupNonUniformBroadcast, SPIRV_OC_OpGroupNonUniformBallot, + SPIRV_OC_OpGroupNonUniformBallotBitCount, SPIRV_OC_OpGroupNonUniformBallotFindLSB, SPIRV_OC_OpGroupNonUniformBallotFindMSB, SPIRV_OC_OpGroupNonUniformShuffle, SPIRV_OC_OpGroupNonUniformShuffleXor, SPIRV_OC_OpGroupNonUniformShuffleUp, diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td index a1b866387e2ec..98e435c18d3d7 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td @@ -14,6 +14,15 @@ #ifndef MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS #define MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS +class SPIRV_AttrIs : PredOpTrait< + operand # " must be " # type # " of value " # value, + CPred<"::llvm::cast<::mlir::spirv::" # type # "Attr>(getProperties()." # operand # ").getValue() == ::mlir::spirv::" # type # "::" # value> + >; + +class SPIRV_ExecutionScopeAttrIs : SPIRV_AttrIs; + +// ----- + class SPIRV_GroupNonUniformArithmeticOp traits = []> : SPIRV_Op { @@ -1287,4 +1296,69 @@ def SPIRV_GroupNonUniformLogicalXorOp : // ----- +def SPIRV_GroupNonUniformBallotBitCountOp : SPIRV_Op<"GroupNonUniformBallotBitCount", [ + SPIRV_ExecutionScopeAttrIs<"execution_scope", "Subgroup">, +]> { + let summary = [{ + Result is the number of bits that are set to 1 in Value, considering + only the bits in Value required to represent all bits of the scope + restricted tangle. + }]; + + let description = [{ + Result Type must be a scalar of integer type, whose Signedness operand + is 0. + + Execution is the scope defining the scope restricted tangle affected by + this command. It must be Subgroup. + + The identity I for Operation is 0. + + Value must be a vector of four components of integer type scalar, whose + Width operand is 32 and whose Signedness operand is 0. + + Value is a set of bitfields where the first invocation is represented in + the lowest bit of the first vector component and the last (up to the + size of the scope) is the higher bit number of the last bitmask needed + to represent all bits of the invocations in the scope restricted tangle. + + An invocation will not execute a dynamic instance of this instruction + (X') until all invocations in its scope restricted tangle have executed + all dynamic instances that are program-ordered before X'. + + + + #### Example: + + ```mlir + %count = spirv.GroupNonUniformBallotBitCount %val : vector<4xi32> -> i32 + ``` + }]; + + let availability = [ + MinVersion, + MaxVersion, + Extension<[]>, + Capability<[SPIRV_C_GroupNonUniformBallot]> + ]; + + let arguments = (ins + SPIRV_ScopeAttr:$execution_scope, + SPIRV_GroupOperationAttr:$group_operation, + SPIRV_Vec4>:$value + ); + + let results = (outs + SPIRV_SignlessOrUnsignedInt:$result + ); + + let hasVerifier = 0; + + let assemblyFormat = [{ + $execution_scope $group_operation $value attr-dict `:` type($value) `->` type($result) + }]; +} + +// ----- + #endif // MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index 98bcbca3b02fa..840558a81493f 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1625,7 +1625,7 @@ def Tosa_ReshapeOp : Tosa_InferTensorTypeOp<"reshape"> { let arguments = (ins Tosa_Tensor:$input1, - DenseI64ArrayAttr:$new_shape + Tosa_Shape:$shape ); let results = (outs diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h index 78a8828855437..4e2f1b9cb19a9 100644 --- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h +++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h @@ -145,7 +145,7 @@ TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy, template TosaOp CreateOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy, Args &&...args) { - if (TosaOp::template hasTrait()) { + if (TosaOp::template hasTrait<::mlir::OpTrait::SameOperandsAndResultRank>()) { // op requires same ranks for tensor operands if constexpr (sizeof...(Args) == 2) { auto argX = std::get<0>(std::tie(args...)); @@ -230,8 +230,11 @@ SmallVector applyTOSAPermutation(ArrayRef input, } // Computes shape value using tosa const_shape op. +Value getTosaConstShape(ImplicitLocOpBuilder &builder, + llvm::ArrayRef shape); Value getTosaConstShape(PatternRewriter &rewriter, Location loc, llvm::ArrayRef shape); + SmallVector convertFromMlirShape(ArrayRef shape); bool getConstShapeValue(Operation *op, diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td index d09c5c1870d50..494f11f041b71 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td @@ -179,7 +179,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc", }]; let hasCustomAssemblyFormat = true; - + let genVerifyDecl = 1; } diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td index 82e335e30b6fa..a18b32253d857 100644 --- a/mlir/include/mlir/IR/CommonTypeConstraints.td +++ b/mlir/include/mlir/IR/CommonTypeConstraints.td @@ -879,12 +879,6 @@ class NestedTupleOf allowedTypes> : //===----------------------------------------------------------------------===// // Common type constraints //===----------------------------------------------------------------------===// -// Type constraint for types that are "like" some type or set of types T, that is -// they're either a T, a vector of Ts, or a tensor of Ts. -class TypeOrContainer : TypeConstraint.predicate]>, - name>; // Type constraint for types that are "like" some type or set of types T, that is // they're either a T or a mapable container of Ts. @@ -894,36 +888,23 @@ class TypeOrValueSemanticsContainer ValueSemanticsContainerOf<[allowedType]>.predicate]>, name>; -// Temporary constraint to allow gradual transition to supporting 0-D vectors. -// TODO: Remove this when all ops support 0-D vectors. -class TypeOrContainerOfAnyRank : TypeConstraint.predicate, - TensorOf<[allowedType]>.predicate]>, - name>; - - // Type constraint for bool-like types: bools, vectors of bools, tensors of // bools. -def BoolLike : TypeOrContainer; +def BoolLike : TypeOrValueSemanticsContainer; -def BoolLikeOfAnyRank : TypeOrContainerOfAnyRank; - -// Type constraint for signless-integer-like types: signless integers, -// vectors of signless integers or tensors of signless integers. +// Type constraint for signless-integer-like types: signless integers or +// value-semantics containers of signless integers. def SignlessIntegerLike : TypeOrValueSemanticsContainer< AnySignlessInteger, "signless-integer">; // Type constraint for signless-integer-like types: signless integers, indices, -// vectors of signless integers or indices, tensors of signless integers. +// or value-semantics containers of signless integers or indices. def SignlessIntegerOrIndexLike : TypeOrValueSemanticsContainer< AnySignlessIntegerOrIndex, "signless-integer-like">; -def SignlessIntegerOrIndexLikeOfAnyRank : TypeOrContainerOfAnyRank< - AnySignlessIntegerOrIndex, - "signless-integer-like">; - -// Type constraint for float-like types: floats, vectors or tensors thereof. -def FloatLike : TypeOrContainer; +// Type constraint for float-like types: floats or value-semantics containers +// of floats. +def FloatLike : TypeOrValueSemanticsContainer; // Type constraint for signless-integer-or-index-like or float-like types. def SignlessIntegerOrFloatLike : TypeConstraint>> diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index 67218cee518d5..0246d9019368a 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -146,11 +146,13 @@ static Value createLinalgBodyCalculationForElementwiseOp( return rewriter.create(loc, resultTypes, args); if (isa(elementTy)) { - auto inputZpAttr = cast(op).getInput1Zp(); - auto outputZpAttr = cast(op).getOutputZp(); + auto inputZpAttr = cast(op).getInput1ZpAttr(); + auto outputZpAttr = cast(op).getOutputZpAttr(); - const int64_t inZp = inputZpAttr ? *inputZpAttr : 0; - const int64_t outZp = outputZpAttr ? *outputZpAttr : 0; + const int64_t inZp = + inputZpAttr ? inputZpAttr.getValue().getSExtValue() : 0; + const int64_t outZp = + outputZpAttr ? outputZpAttr.getValue().getSExtValue() : 0; if (!inZp && !outZp) { auto constant = rewriter.create( @@ -1954,9 +1956,10 @@ struct TileConverter : public OpConversionPattern { nestedBuilder.create(op.getLoc(), *args.begin()); }); + auto shapeValue = getTosaConstShape( + rewriter, loc, mlir::tosa::convertFromMlirShape(resultTy.getShape())); rewriter.replaceOpWithNewOp( - op, resultTy, genericOp.getResult(0), - rewriter.getDenseI64ArrayAttr(resultTy.getShape())); + op, resultTy, genericOp.getResult(0), shapeValue); return success(); } }; diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 2a9b4d111bdfa..7f029d56e2582 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -16,6 +16,7 @@ #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tensor/Utils/Utils.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" +#include "mlir/Dialect/Tosa/Utils/ConversionUtils.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Transforms/DialectConversion.h" @@ -235,7 +236,12 @@ class ReshapeConverter : public OpConversionPattern { return rewriter.notifyMatchFailure(reshape.getLoc(), "expected input type to be tensor"); } - auto newShape = reshape.getNewShape(); + + llvm::SmallVector newShape; + if (!tosa::getConstShapeValue(reshape.getShape().getDefiningOp(), + newShape)) { + return failure(); + } // Infer all intermediate types auto inputType = inferReshapeInputType(input, newShape); diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index 1ecb892a4ea92..bca77ba68fbd1 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -770,10 +770,20 @@ struct VectorLoadOpConverter final spirv::StorageClass storageClass = attr.getValue(); auto vectorType = loadOp.getVectorType(); - auto vectorPtrType = spirv::PointerType::get(vectorType, storageClass); - Value castedAccessChain = - rewriter.create(loc, vectorPtrType, accessChain); - rewriter.replaceOpWithNewOp(loadOp, vectorType, + // Use the converted vector type instead of original (single element vector + // would get converted to scalar). + auto spirvVectorType = typeConverter.convertType(vectorType); + auto vectorPtrType = spirv::PointerType::get(spirvVectorType, storageClass); + + // For single element vectors, we don't need to bitcast the access chain to + // the original vector type. Both is going to be the same, a pointer + // to a scalar. + Value castedAccessChain = (vectorType.getNumElements() == 1) + ? accessChain + : rewriter.create( + loc, vectorPtrType, accessChain); + + rewriter.replaceOpWithNewOp(loadOp, spirvVectorType, castedAccessChain); return success(); @@ -806,8 +816,15 @@ struct VectorStoreOpConverter final spirv::StorageClass storageClass = attr.getValue(); auto vectorType = storeOp.getVectorType(); auto vectorPtrType = spirv::PointerType::get(vectorType, storageClass); - Value castedAccessChain = - rewriter.create(loc, vectorPtrType, accessChain); + + // For single element vectors, we don't need to bitcast the access chain to + // the original vector type. Both is going to be the same, a pointer + // to a scalar. + Value castedAccessChain = (vectorType.getNumElements() == 1) + ? accessChain + : rewriter.create( + loc, vectorPtrType, accessChain); + rewriter.replaceOpWithNewOp(storeOp, castedAccessChain, adaptor.getValueToStore()); diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp index 9c0b5dbf52d29..10de0d04cbea6 100644 --- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/Affine/Analysis/Utils.h" + #include "mlir/Analysis/Presburger/PresburgerRelation.h" #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h" #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h" @@ -2297,3 +2298,41 @@ FailureOr mlir::affine::simplifyConstrainedMinMaxOp( affine::canonicalizeMapAndOperands(&newMap, &newOperands); return AffineValueMap(newMap, newOperands); } + +Block *mlir::affine::findInnermostCommonBlockInScope(Operation *a, + Operation *b) { + Region *aScope = mlir::affine::getAffineScope(a); + Region *bScope = mlir::affine::getAffineScope(b); + if (aScope != bScope) + return nullptr; + + // Get the block ancestry of `op` while stopping at the affine scope `aScope` + // and store them in `ancestry`. + auto getBlockAncestry = [&](Operation *op, + SmallVectorImpl &ancestry) { + Operation *curOp = op; + do { + ancestry.push_back(curOp->getBlock()); + if (curOp->getParentRegion() == aScope) + break; + curOp = curOp->getParentOp(); + } while (curOp); + assert(curOp && "can't reach root op without passing through affine scope"); + std::reverse(ancestry.begin(), ancestry.end()); + }; + + SmallVector aAncestors, bAncestors; + getBlockAncestry(a, aAncestors); + getBlockAncestry(b, bAncestors); + assert(!aAncestors.empty() && !bAncestors.empty() && + "at least one Block ancestor expected"); + + Block *innermostCommonBlock = nullptr; + for (unsigned a = 0, b = 0, e = aAncestors.size(), f = bAncestors.size(); + a < e && b < f; ++a, ++b) { + if (aAncestors[a] != bAncestors[b]) + break; + innermostCommonBlock = aAncestors[a]; + } + return innermostCommonBlock; +} diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp index c22ec213be95c..fe6cf0f434cb7 100644 --- a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp @@ -41,7 +41,7 @@ namespace affine { } // namespace affine } // namespace mlir -#define DEBUG_TYPE "affine-loop-fusion" +#define DEBUG_TYPE "affine-fusion" using namespace mlir; using namespace mlir::affine; @@ -237,29 +237,67 @@ static void sinkSequentialLoops(MemRefDependenceGraph::Node *node) { node->op = newRootForOp; } -// Creates and returns a private (single-user) memref for fused loop rooted -// at 'forOp', with (potentially reduced) memref size based on the -// MemRefRegion written to by 'srcStoreOpInst' at depth 'dstLoopDepth'. -// TODO: consider refactoring the common code from generateDma and -// this one. -static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, +/// Get the operation that should act as a dominance filter while replacing +/// memref uses with a private memref for which `producerStores` and +/// `sliceInsertionBlock` are provided. This effectively determines in what +/// part of the IR we should be performing the replacement. +static Operation * +getDominanceFilterForPrivateMemRefRepl(Block *sliceInsertionBlock, + ArrayRef producerStores) { + assert(!producerStores.empty() && "expected producer store"); + + // We first find the common block that contains the producer stores and + // the slice computation. The first ancestor among the ancestors of the + // producer stores in that common block is the dominance filter to use for + // replacement. + Block *commonBlock = nullptr; + // Find the common block of all relevant operations. + for (Operation *store : producerStores) { + Operation *otherOp = + !commonBlock ? &*sliceInsertionBlock->begin() : &*commonBlock->begin(); + commonBlock = findInnermostCommonBlockInScope(store, otherOp); + } + assert(commonBlock && + "common block of producer stores and slice should exist"); + + // Find the first ancestor among the ancestors of `producerStores` in + // `commonBlock`. + Operation *firstAncestor = nullptr; + for (Operation *store : producerStores) { + Operation *ancestor = commonBlock->findAncestorOpInBlock(*store); + assert(ancestor && "producer store should be contained in common block"); + firstAncestor = !firstAncestor || ancestor->isBeforeInBlock(firstAncestor) + ? ancestor + : firstAncestor; + } + return firstAncestor; +} + +// Creates and returns a private (single-user) memref for fused loop rooted at +// 'forOp', with (potentially reduced) memref size based on the memref region +// written to by `storeOps` at depth 'dstLoopDepth'. 'sliceInsertionBlock' +// specifies the block in which the slice was/will be inserted. +static Value createPrivateMemRef(AffineForOp forOp, + ArrayRef storeOps, unsigned dstLoopDepth, std::optional fastMemorySpace, + Block *sliceInsertionBlock, uint64_t localBufSizeThreshold) { - Operation *forInst = forOp.getOperation(); + assert(!storeOps.empty() && "no source stores supplied"); + Operation *srcStoreOp = storeOps[0]; // Create builder to insert alloc op just before 'forOp'. - OpBuilder b(forInst); + OpBuilder b(forOp); // Builder to create constants at the top level. - OpBuilder top(forInst->getParentRegion()); + OpBuilder top(forOp->getParentRegion()); // Create new memref type based on slice bounds. - auto oldMemRef = cast(srcStoreOpInst).getMemRef(); + auto oldMemRef = cast(srcStoreOp).getMemRef(); auto oldMemRefType = cast(oldMemRef.getType()); unsigned rank = oldMemRefType.getRank(); // Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'. - MemRefRegion region(srcStoreOpInst->getLoc()); - bool validRegion = succeeded(region.compute(srcStoreOpInst, dstLoopDepth)); + MemRefRegion region(srcStoreOp->getLoc()); + bool validRegion = succeeded(region.compute(srcStoreOp, dstLoopDepth)); (void)validRegion; assert(validRegion && "unexpected memref region failure"); SmallVector newShape; @@ -332,11 +370,12 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, AffineMap::get(outerIVs.size() + rank, 0, remapExprs, forOp.getContext()); // Replace all users of 'oldMemRef' with 'newMemRef'. - LogicalResult res = - replaceAllMemRefUsesWith(oldMemRef, newMemRef, {}, indexRemap, - /*extraOperands=*/outerIVs, - /*symbolOperands=*/{}, - /*domOpFilter=*/&*forOp.getBody()->begin()); + Operation *domFilter = + getDominanceFilterForPrivateMemRefRepl(sliceInsertionBlock, storeOps); + LogicalResult res = replaceAllMemRefUsesWith( + oldMemRef, newMemRef, /*extraIndices=*/{}, indexRemap, + /*extraOperands=*/outerIVs, + /*symbolOperands=*/{}, domFilter); assert(succeeded(res) && "replaceAllMemrefUsesWith should always succeed here"); (void)res; @@ -944,6 +983,10 @@ struct GreedyFusion { // Create private memrefs. if (!privateMemrefs.empty()) { + // Note the block into which fusion was performed. This can be used to + // place `alloc`s that create private memrefs. + Block *sliceInsertionBlock = bestSlice.insertPoint->getBlock(); + // Gather stores for all the private-to-be memrefs. DenseMap> privateMemRefToStores; dstAffineForOp.walk([&](AffineWriteOpInterface storeOp) { @@ -962,8 +1005,8 @@ struct GreedyFusion { SmallVector &storesForMemref = memrefToStoresPair.second; Value newMemRef = createPrivateMemRef( - dstAffineForOp, storesForMemref[0], bestDstLoopDepth, - fastMemorySpace, localBufSizeThreshold); + dstAffineForOp, storesForMemref, bestDstLoopDepth, + fastMemorySpace, sliceInsertionBlock, localBufSizeThreshold); // Create new node in dependence graph for 'newMemRef' alloc op. unsigned newMemRefNodeId = mdg->addNode(newMemRef.getDefiningOp()); // Add edge from 'newMemRef' node to dstNode. diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp index 241b25c6caf12..62f0c21338111 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -1284,6 +1284,36 @@ llvm::Intrinsic::ID Tcgen05DeallocOp::getIntrinsicIDAndArgs( return id; } +#define TCGEN05_COMMIT_IMPL(cg, is_shared, mc) \ + is_shared ? llvm::Intrinsic::nvvm_tcgen05_commit##mc##_shared##_##cg \ + : llvm::Intrinsic::nvvm_tcgen05_commit##mc##_##cg + +#define GET_TCGEN05_COMMIT_ID(cta_group, is_shared, has_mc) \ + has_mc ? TCGEN05_COMMIT_IMPL(cta_group, is_shared, _mc) \ + : TCGEN05_COMMIT_IMPL(cta_group, is_shared, ) + +llvm::Intrinsic::ID +Tcgen05CommitOp::getIntrinsicIDAndArgs(Operation &op, + LLVM::ModuleTranslation &mt, + llvm::SmallVector &args) { + auto curOp = cast(op); + unsigned AS = llvm::cast(curOp.getAddr().getType()) + .getAddressSpace(); + bool isShared = AS == NVVMMemorySpace::kSharedMemorySpace; + bool hasMulticast = curOp.getMulticastMask() ? true : false; + bool is2CTAMode = curOp.getGroup() == Tcgen05GroupKind::CTA_2; + + auto id = is2CTAMode ? GET_TCGEN05_COMMIT_ID(cg2, isShared, hasMulticast) + : GET_TCGEN05_COMMIT_ID(cg1, isShared, hasMulticast); + + // Fill the Intrinsic Args + args.push_back(mt.lookupValue(curOp.getAddr())); + if (hasMulticast) + args.push_back(mt.lookupValue(curOp.getMulticastMask())); + + return id; +} + /// Infer the result ranges for the NVVM SpecialRangeableRegisterOp that might /// have ConstantRangeAttr. static void nvvmInferResultRanges(Operation *op, Value result, diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index a43fa86166e83..f2c23c49a78e8 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -2716,15 +2716,12 @@ vectorizeAsInsertSliceOp(RewriterBase &rewriter, tensor::InsertSliceOp sliceOp, } auto vecType = VectorType::get(vecShape, sourceType.getElementType()); - // 3. Generate TransferReadOp. - SmallVector readIndices( - vecType.getRank(), - rewriter.create(sliceOp.getLoc(), 0)); - Operation *read = rewriter.create( - sliceOp.getLoc(), vecType, source, readIndices, padValue, - ArrayRef{readInBounds}); + // 3. Generate TransferReadOp + TransferWriteOp + ReifiedRankedShapedTypeDims reifiedSrcSizes; + Value maskOp; - // If vector sizes are user provided, make sure to mask xfer_read. + // If vector sizes are user provided, make sure to mask. First, generate the + // mask. if (!inputVectorSizes.empty()) { auto *srcDefOp = source.getDefiningOp(); if (!srcDefOp) { @@ -2732,40 +2729,43 @@ vectorizeAsInsertSliceOp(RewriterBase &rewriter, tensor::InsertSliceOp sliceOp, return failure(); } - ReifiedRankedShapedTypeDims reifiedSrcSizes; LogicalResult status = cast(srcDefOp).reifyResultShapes( rewriter, reifiedSrcSizes); if (status.failed()) { - LDBG("Unable to reify result shapes of " << sliceOp); + LDBG("Unable to reify result shapes of " << srcDefOp); return failure(); } // Create the mask - SmallVector readMaskShape( - sliceOp.getSource().getType().getShape()); auto readMaskType = VectorType::get(inputVectorSizes, rewriter.getI1Type()); - Value maskOp = rewriter.create( + maskOp = rewriter.create( sliceOp.getLoc(), readMaskType, reifiedSrcSizes[0]); - - // Mask the xfer_read Op - read = mlir::vector::maskOperation(rewriter, read, maskOp); } - // 4. Generate TransferWriteOp. - if (!inputVectorSizes.empty() && - ShapedType::isDynamicShape(resultType.getShape())) { - LDBG("TODO: Masking of xfer_write when vectorising " << sliceOp); - return failure(); + SmallVector readIndices( + vecType.getRank(), + rewriter.create(sliceOp.getLoc(), 0)); + Operation *read = rewriter.create( + sliceOp.getLoc(), vecType, source, readIndices, padValue, + ArrayRef{readInBounds}); + + if (maskOp) { + read = mlir::vector::maskOperation(rewriter, read, maskOp); } auto writeIndices = getValueOrCreateConstantIndexOp( rewriter, sliceOp.getLoc(), sliceOp.getMixedOffsets()); - // 5. Finalize Operation *write = rewriter.create( sliceOp.getLoc(), read->getResult(0), sliceOp.getDest(), writeIndices, ArrayRef{writeInBounds}); + + if (maskOp) { + write = mlir::vector::maskOperation(rewriter, write, maskOp); + } + + // 4. Finalize newResults.push_back(write->getResult(0)); return success(); diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp index 8e22c879753a3..a9a65ac271b3c 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -180,7 +180,7 @@ struct TransposeIsReshape : public OpRewritePattern { rewriter.replaceOpWithNewOp( op, op.getType(), op.getInput1(), - rewriter.getDenseI64ArrayAttr(newShape)); + getTosaConstShape(rewriter, op.getLoc(), newShape)); return success(); } }; @@ -948,8 +948,12 @@ OpFoldResult ReshapeOp::fold(FoldAdaptor adaptor) { if (!getInput1().hasOneUse()) return {}; + llvm::SmallVector shapeVec; + if (!tosa::getConstShapeValue(getShape().getDefiningOp(), shapeVec)) + return {}; + return operand.reshape( - llvm::cast(operand.getType()).clone(getNewShape())); + llvm::cast(operand.getType()).clone(shapeVec)); } return {}; diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 031c279ff09e2..955021abdd67b 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1335,8 +1335,16 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape(adaptor.getInput1().getType()); Type inputType = getElementTypeOrSelf(adaptor.getInput1().getType()); - llvm::SmallVector newShapeValue = - convertToMlirShape(adaptor.getNewShape()); + llvm::SmallVector newShapeValue; + if (!tosa::getConstShapeValue(adaptor.getShape().getDefiningOp(), + newShapeValue)) { + auto rank = cast(adaptor.getShape().getType()).getRank(); + SmallVector fallback(rank, ShapedType::kDynamic); + inferredReturnShapes.push_back(ShapedTypeComponents(fallback, inputType)); + return success(); + } else { + newShapeValue = convertToMlirShape(newShapeValue); + } // We cannot infer from the total number of elements so we must take the // shape attribute as exact. @@ -1372,13 +1380,19 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { TensorType inputType = getInput1().getType(); RankedTensorType outputType = getType(); - if ((int64_t)getNewShape().size() != outputType.getRank()) + SmallVector shapeValues; + if (!tosa::getConstShapeValue(getShape().getDefiningOp(), shapeValues)) { + // skip following checks if shape is not constant + return mlir::success(); + } + + if ((int64_t)shapeValues.size() != outputType.getRank()) return emitOpError() << "new shape does not match result rank"; for (auto [newShapeDim, outputShapeDim] : - zip(getNewShape(), outputType.getShape())) { - if (newShapeDim != -1 && outputShapeDim != ShapedType::kDynamic && - newShapeDim != outputShapeDim) + zip(shapeValues, outputType.getShape())) { + if (newShapeDim != -1 && newShapeDim != ShapedType::kDynamic && + outputShapeDim != ShapedType::kDynamic && newShapeDim != outputShapeDim) return emitOpError() << "new shape is inconsistent with result shape"; if (newShapeDim != ShapedType::kDynamic && newShapeDim < -1) @@ -1397,10 +1411,10 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { } int64_t newShapeElementsNum = std::accumulate( - getNewShape().begin(), getNewShape().end(), 1LL, + shapeValues.begin(), shapeValues.end(), 1LL, [](int64_t acc, int64_t dim) { return (dim > 0) ? acc * dim : acc; }); bool isStaticNewShape = - llvm::all_of(getNewShape(), [](int64_t s) { return s > 0; }); + llvm::all_of(shapeValues, [](int64_t s) { return s > 0; }); if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) || (!isStaticNewShape && newShapeElementsNum > inputElementsNum)) { return emitOpError() << "cannot reshape " << inputElementsNum @@ -1408,7 +1422,7 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { } } - int missingDims = llvm::count(getNewShape(), -1); + int missingDims = llvm::count(shapeValues, -1); if (missingDims > 1) return emitOpError() << "expected at most one target dimension to be -1"; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp index 4eba89b59bbd7..617a59bc87c9f 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -20,12 +20,6 @@ using namespace mlir::tosa; namespace { -SmallVector convertFromMlirShape(ArrayRef shape) { - return to_vector(llvm::map_range(shape, [](int64_t dim) { - return ShapedType::isDynamic(dim) ? -1 : dim; - })); -} - struct Conv2DIsFullyConnected : public OpRewritePattern { explicit Conv2DIsFullyConnected(MLIRContext *context) : OpRewritePattern(context) {} @@ -98,12 +92,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { llvm::SmallVector revisedInputShape{combined, inputShape[3]}; auto revisedInputShapeType = RankedTensorType::get(revisedInputShape, inputType.getElementType()); - auto reshapedInput = rewriter - .create( - op.getLoc(), revisedInputShapeType, input, - rewriter.getDenseI64ArrayAttr( - convertFromMlirShape(revisedInputShape))) - .getResult(); + auto revisedInputShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(revisedInputShape)); + auto reshapedInput = + rewriter + .create(op.getLoc(), revisedInputShapeType, input, + revisedInputShapeValue) + .getResult(); // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC]. llvm::SmallVector revisedWeightShape{weightShape[0], @@ -111,12 +106,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { auto revisedWeightShapeType = RankedTensorType::get( revisedWeightShape, dyn_cast(weight.getType()).getElementType()); - auto reshapedWeight = rewriter - .create( - op.getLoc(), revisedWeightShapeType, weight, - rewriter.getDenseI64ArrayAttr( - convertFromMlirShape(revisedWeightShape))) - .getResult(); + auto revisedWeightShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(revisedWeightShape)); + auto reshapedWeight = + rewriter + .create(op.getLoc(), revisedWeightShapeType, + weight, revisedWeightShapeValue) + .getResult(); // Perform a fully connected network over the reshaped input and weight. llvm::SmallVector fullyConnectedShape{combined, weightShape[0]}; @@ -149,9 +145,10 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { // Reshape output to [N, IH, IW, OC]. llvm::SmallVector outputShape{inputShape[0], inputShape[1], inputShape[2], weightShape[0]}; + auto outputShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(outputShape)); rewriter.replaceOpWithNewOp( - op, resultType, fullyConnectedValue, - rewriter.getDenseI64ArrayAttr(convertFromMlirShape(outputShape))); + op, resultType, fullyConnectedValue, outputShapeValue); return success(); } }; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp index ee857f1998a54..b26397d0e3ed7 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp @@ -55,10 +55,11 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern { inputType = RankedTensorType::get( revisedInputShape, dyn_cast(input.getType()).getElementType()); + auto revisedInputShapeValue = + getTosaConstShape(rewriter, op.getLoc(), revisedInputShape); input = rewriter - .create( - op.getLoc(), inputType, input, - rewriter.getDenseI64ArrayAttr(revisedInputShape)) + .create(op.getLoc(), inputType, input, + revisedInputShapeValue) .getResult(); Type inputETy = inputType.getElementType(); @@ -153,9 +154,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern { auto outputShapeType = RankedTensorType::get( outputShape, dyn_cast(input.getType()).getElementType()); + auto outputShapeValue = + getTosaConstShape(rewriter, op->getLoc(), outputShape); Value outputValue = rewriter.create( - op.getLoc(), outputShapeType, mulValue, - rewriter.getDenseI64ArrayAttr(outputShape)); + op.getLoc(), outputShapeType, mulValue, outputShapeValue); Value bias = op.getBias(); if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) { diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp index b5b3e9d76c47e..26baddcf1dd15 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -159,9 +159,11 @@ class TransposeConvStridedConverter outputChannels, weightHeight / stride[0], stride[0], weightWidth / stride[1], stride[1], inputChannels}; + + ImplicitLocOpBuilder builder(op->getLoc(), rewriter); weight = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(weightETy), weight, - rewriter.getDenseI64ArrayAttr(weightReshapeDims0)); + builder, UnrankedTensorType::get(weightETy), weight, + getTosaConstShape(rewriter, loc, weightReshapeDims0)); // Transpose the factored-out stride to the output channels. Value transposeWeightVal = rewriter.create( @@ -173,12 +175,13 @@ class TransposeConvStridedConverter transposeWeightVal); // Collapse the strides and output channels into a single dimension. - llvm::SmallVector weightReshapeDims1 = { + llvm::SmallVector weightReshapeDims1 = { outputChannels * stride[0] * stride[1], weightHeight / stride[0], weightWidth / stride[1], inputChannels}; + weight = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(weightETy), weight, - rewriter.getDenseI64ArrayAttr(weightReshapeDims1)); + getTosaConstShape(rewriter, loc, weightReshapeDims1)); ShapedType restridedWeightTy = cast(weight.getType()); weight = CreateOpAndInferShape( @@ -257,9 +260,13 @@ class TransposeConvStridedConverter // Factor striding out of the convolution result. llvm::SmallVector convReshapeDims0 = { batch, convHeight, convWidth, stride[0], stride[1], outputChannels}; + + auto convReshapeDims0Value = + getTosaConstShape(rewriter, loc, convReshapeDims0); + conv2d = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, - rewriter.getDenseI64ArrayAttr(convReshapeDims0)); + convReshapeDims0Value); // Transpose the factored-out stride to the output channels. Value transposeConvVal = rewriter.create( @@ -273,9 +280,13 @@ class TransposeConvStridedConverter // Fuse striding behavior back into width / height. llvm::SmallVector convReshapeDims1 = { batch, convHeight * stride[0], convWidth * stride[1], outputChannels}; + + auto convReshapeDims1Value = + getTosaConstShape(rewriter, loc, convReshapeDims1); + conv2d = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, - rewriter.getDenseI64ArrayAttr(convReshapeDims1)); + convReshapeDims1Value); // Determine the amount to slice / pad from the result start. int64_t resultSliceTop = std::max(0, -pad[0]); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp index 520f283a3ba88..281f0529a5c08 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp @@ -402,13 +402,20 @@ std::optional TosaReduceTransposes::buildMappedToValue( return std::nullopt; // Do not insert a TransposeOp, instead we fold the reshape and its attribute. + llvm::SmallVector newShape; + if (!tosa::getConstShapeValue(reshapeOp.getShape().getDefiningOp(), + newShape)) { + // this mean shape is not constant + return std::nullopt; + } + ImplicitLocOpBuilder builder(reshapeOp.getLoc(), rewriter); auto foldedReshape = rewriter.create( reshapeOp.getLoc(), RankedTensorType::get(applyTOSAPermutation(shape, hoistedPerms), reshapeOutputType.getElementType()), reshapeOp.getInput1(), - rewriter.getDenseI64ArrayAttr( - applyTOSAPermutation(reshapeOp.getNewShape(), hoistedPerms))); + getTosaConstShape(builder, applyTOSAPermutation(llvm::ArrayRef(newShape), + hoistedPerms))); return foldedReshape->getResult(0); } diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp index 62b0bc1857e39..8ab12d038849f 100644 --- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp +++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp @@ -145,10 +145,10 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder, llvm::cast(lowerTensorValue.getType()); auto reshapeOutputType = RankedTensorType::get( ArrayRef(reshapeOutputShape), reshapeInputType.getElementType()); + auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape); auto reshapeLower = builder.create( - reshapeOutputType, lowerTensorValue, - builder.getDenseI64ArrayAttr(reshapeOutputShape)); + reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue); if (input1Rank > input2Rank) { input1 = higherTensorValue; @@ -161,15 +161,20 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder, return success(); } -Value mlir::tosa::getTosaConstShape(PatternRewriter &rewriter, Location loc, +Value mlir::tosa::getTosaConstShape(ImplicitLocOpBuilder &builder, llvm::ArrayRef shape) { - auto attr = rewriter.getIndexTensorAttr(shape); - auto type = mlir::tosa::shapeType::get(rewriter.getContext(), shape.size()); - mlir::Operation *mlir_op = - rewriter.create(loc, type, attr); + auto attr = builder.getIndexTensorAttr(convertFromMlirShape(shape)); + auto type = mlir::tosa::shapeType::get(builder.getContext(), shape.size()); + mlir::Operation *mlir_op = builder.create(type, attr); return mlir_op->getResult(0); } +Value mlir::tosa::getTosaConstShape(PatternRewriter &rewriter, Location loc, + llvm::ArrayRef shape) { + ImplicitLocOpBuilder builder(loc, rewriter); + return getTosaConstShape(builder, shape); +} + SmallVector mlir::tosa::convertFromMlirShape(ArrayRef shape) { return to_vector(llvm::map_range(shape, [](int64_t dim) { return ShapedType::isDynamic(dim) ? -1 : dim; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 30ff2df7c38fc..b4a5461f4405d 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -1991,15 +1991,23 @@ static Value foldScalarExtractFromFromElements(ExtractOp extractOp) { /// Fold an insert or extract operation into an poison value when a poison index /// is found at any dimension of the static position. -static ub::PoisonAttr -foldPoisonIndexInsertExtractOp(MLIRContext *context, - ArrayRef staticPos, int64_t poisonVal) { +static Attribute foldPoisonIndexInsertExtractOp(MLIRContext *context, + ArrayRef staticPos, + int64_t poisonVal) { if (!llvm::is_contained(staticPos, poisonVal)) - return ub::PoisonAttr(); + return {}; return ub::PoisonAttr::get(context); } +/// Fold a vector extract from is a poison source. +static Attribute foldPoisonSrcExtractOp(Attribute srcAttr) { + if (llvm::isa_and_nonnull(srcAttr)) + return srcAttr; + + return {}; +} + OpFoldResult ExtractOp::fold(FoldAdaptor adaptor) { // Fold "vector.extract %v[] : vector<2x2xf32> from vector<2x2xf32>" to %v. // Note: Do not fold "vector.extract %v[] : f32 from vector" (type @@ -2009,6 +2017,8 @@ OpFoldResult ExtractOp::fold(FoldAdaptor adaptor) { if (auto res = foldPoisonIndexInsertExtractOp( getContext(), adaptor.getStaticPosition(), kPoisonIndex)) return res; + if (auto res = foldPoisonSrcExtractOp(adaptor.getVector())) + return res; if (succeeded(foldExtractOpFromExtractChain(*this))) return getResult(); if (auto res = ExtractFromInsertTransposeChainState(*this).fold()) diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp index d8c4939dc742a..89930a6bd35fa 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" @@ -32,7 +33,7 @@ namespace { /// /// Would be unrolled to: /// -/// %result = arith.constant dense<0> : vector<1x2x3x8xi32> +/// %result = ub.poison : vector<1x2x3x8xi32> /// %0 = vector.extract %a[0, 0, 0] ─┐ /// : vector<4xi64> from vector<1x2x3x4xi64> | /// %1 = vector.bitcast %0 | - Repeated 6x for @@ -63,8 +64,7 @@ class UnrollBitCastOp final : public OpRewritePattern { VectorType::get(shape, resultType.getElementType(), scalableDims); Location loc = op.getLoc(); - Value result = rewriter.create( - loc, resultType, rewriter.getZeroAttr(resultType)); + Value result = rewriter.create(loc, resultType); for (auto position : *unrollIterator) { Value extract = rewriter.create(loc, op.getSource(), position); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp index 6c36bbaee8523..fec3c6c52e5e4 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp @@ -11,27 +11,16 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Affine/IR/AffineOps.h" -#include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/Arith/Utils/Utils.h" -#include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" -#include "mlir/Dialect/Utils/IndexingUtils.h" -#include "mlir/Dialect/Utils/StructuredOpsUtils.h" +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" -#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" -#include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" -#include "mlir/Interfaces/VectorInterfaces.h" #define DEBUG_TYPE "vector-broadcast-lowering" @@ -86,8 +75,7 @@ class BroadcastOpLowering : public OpRewritePattern { VectorType resType = VectorType::Builder(dstType).dropDim(0); Value bcst = rewriter.create(loc, resType, op.getSource()); - Value result = rewriter.create( - loc, dstType, rewriter.getZeroAttr(dstType)); + Value result = rewriter.create(loc, dstType); for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) result = rewriter.create(loc, bcst, result, d); rewriter.replaceOp(op, result); @@ -127,8 +115,7 @@ class BroadcastOpLowering : public OpRewritePattern { VectorType resType = VectorType::get(dstType.getShape().drop_front(), eltType, dstType.getScalableDims().drop_front()); - Value result = rewriter.create( - loc, dstType, rewriter.getZeroAttr(dstType)); + Value result = rewriter.create(loc, dstType); if (m == 0) { // Stetch at start. Value ext = rewriter.create(loc, op.getSource(), 0); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp index 239dc9aa1de6f..9c1e5fcee91de 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp @@ -11,8 +11,8 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/UB//IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" @@ -73,8 +73,7 @@ class ShapeCastOpNDDownCastRewritePattern SmallVector srcIdx(srcRank - 1, 0); SmallVector resIdx(resRank, 0); int64_t extractSize = sourceVectorType.getShape().back(); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); // Compute the indices of each 1-D vector element of the source extraction // and destination slice insertion and generate such instructions. @@ -129,8 +128,7 @@ class ShapeCastOpNDUpCastRewritePattern SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank - 1, 0); int64_t extractSize = resultVectorType.getShape().back(); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); for (int64_t i = 0; i < numElts; ++i) { if (i != 0) { incIdx(srcIdx, sourceVectorType, /*step=*/extractSize); @@ -184,8 +182,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern { // within the source and result shape. SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank, 0); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); for (int64_t i = 0; i < numElts; i++) { if (i != 0) { incIdx(srcIdx, sourceVectorType); @@ -291,9 +288,7 @@ class ScalableShapeCastOpRewritePattern auto extractionVectorType = VectorType::get( {minExtractionSize}, sourceVectorType.getElementType(), {true}); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); - + Value result = rewriter.create(loc, resultVectorType); SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank, 0); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp index 3c92b222e6bc8..fb4dee33bc5f5 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp @@ -11,26 +11,19 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/Arith/Utils/Utils.h" -#include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Utils/IndexingUtils.h" #include "mlir/Dialect/Utils/StructuredOpsUtils.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" -#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" -#include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" -#include "mlir/Interfaces/VectorInterfaces.h" #define DEBUG_TYPE "lower-vector-transpose" @@ -209,7 +202,7 @@ static Value transposeToShuffle16x16(OpBuilder &builder, Value source, int m, ImplicitLocOpBuilder b(source.getLoc(), builder); SmallVector vs; for (int64_t i = 0; i < m; ++i) - vs.push_back(b.create(source, i)); + vs.push_back(b.createOrFold(source, i)); // Interleave 32-bit lanes using // 8x _mm512_unpacklo_epi32 @@ -291,8 +284,7 @@ static Value transposeToShuffle16x16(OpBuilder &builder, Value source, int m, auto reshInputType = VectorType::get( {m, n}, cast(source.getType()).getElementType()); - Value res = - b.create(reshInputType, b.getZeroAttr(reshInputType)); + Value res = b.create(reshInputType); for (int64_t i = 0; i < m; ++i) res = b.create(vs[i], res, i); return res; @@ -368,8 +360,7 @@ class TransposeOpLowering : public OpRewritePattern { // of the leftmost transposed dimensions. We traverse every transpose // element using a linearized index that we delinearize to generate the // appropriate indices for the extract/insert operations. - Value result = rewriter.create( - loc, resType, rewriter.getZeroAttr(resType)); + Value result = rewriter.create(loc, resType); int64_t numTransposedElements = ShapedType::getNumElements(prunedInShape); for (int64_t linearIdx = 0; linearIdx < numTransposedElements; @@ -378,9 +369,9 @@ class TransposeOpLowering : public OpRewritePattern { SmallVector insertIdxs(extractIdxs); applyPermutationToVector(insertIdxs, prunedTransp); Value extractOp = - rewriter.create(loc, input, extractIdxs); - result = - rewriter.create(loc, extractOp, result, insertIdxs); + rewriter.createOrFold(loc, input, extractIdxs); + result = rewriter.createOrFold(loc, extractOp, result, + insertIdxs); } rewriter.replaceOp(op, result); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp index 800c1d9fb1dbf..c1e3850f05c5e 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp @@ -172,7 +172,7 @@ struct UnrollTransferReadPattern readOp.getPermutationMapAttr(), readOp.getPadding(), readOp.getMask(), readOp.getInBoundsAttr()); - result = rewriter.create( + result = rewriter.createOrFold( loc, slicedRead, result, elementOffsets, strides); } rewriter.replaceOp(readOp, result); @@ -213,7 +213,7 @@ struct UnrollTransferWritePattern Value resultTensor; for (SmallVector elementOffsets : StaticTileOffsetRange(originalSize, *targetShape, loopOrder)) { - Value slicedVector = rewriter.create( + Value slicedVector = rewriter.createOrFold( loc, writeOp.getVector(), elementOffsets, *targetShape, strides); SmallVector indices = sliceTransferIndices(elementOffsets, originalIndices, @@ -289,8 +289,9 @@ struct UnrollContractionPattern SmallVector operandShape = applyPermutationMap( permutationMap, ArrayRef(*targetShape)); SmallVector operandStrides(operandOffets.size(), 1); - slicesOperands[index] = rewriter.create( - loc, operand, operandOffets, operandShape, operandStrides); + slicesOperands[index] = + rewriter.createOrFold( + loc, operand, operandOffets, operandShape, operandStrides); }; // Extract the new lhs operand. @@ -333,7 +334,7 @@ struct UnrollContractionPattern loc, dstVecType, rewriter.getZeroAttr(dstVecType)); for (const auto &it : accCache) { SmallVector dstStrides(it.first.size(), 1); - result = rewriter.create( + result = rewriter.createOrFold( loc, it.second, result, it.first, dstStrides); } rewriter.replaceOp(contractOp, result); @@ -371,8 +372,10 @@ struct UnrollMultiReductionPattern StaticTileOffsetRange(originalSize, *targetShape)) { SmallVector operands; SmallVector operandStrides(offsets.size(), 1); - Value slicedOperand = rewriter.create( - loc, reductionOp.getSource(), offsets, *targetShape, operandStrides); + Value slicedOperand = + rewriter.createOrFold( + loc, reductionOp.getSource(), offsets, *targetShape, + operandStrides); operands.push_back(slicedOperand); SmallVector dstShape; SmallVector destOffset; @@ -390,7 +393,7 @@ struct UnrollMultiReductionPattern if (accIt != accCache.end()) acc = accIt->second; else - acc = rewriter.create( + acc = rewriter.createOrFold( loc, reductionOp.getAcc(), destOffset, dstShape, accStrides); operands.push_back(acc); auto targetType = VectorType::get( @@ -406,7 +409,7 @@ struct UnrollMultiReductionPattern rewriter.getZeroAttr(reductionOp.getDestType())); for (const auto &it : accCache) { SmallVector dstStrides(it.first.size(), 1); - result = rewriter.create( + result = rewriter.createOrFold( loc, it.second, result, it.first, dstStrides); } rewriter.replaceOp(reductionOp, result); @@ -453,12 +456,12 @@ struct UnrollElementwisePattern : public RewritePattern { continue; } extractOperands.push_back( - rewriter.create( + rewriter.createOrFold( loc, operand.get(), offsets, *targetShape, strides)); } Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, op, extractOperands, newVecType); - result = rewriter.create( + result = rewriter.createOrFold( loc, newOp->getResult(0), result, offsets, strides); } rewriter.replaceOp(op, result); @@ -490,8 +493,9 @@ struct UnrollReductionPattern : public OpRewritePattern { for (SmallVector offsets : StaticTileOffsetRange(originalSize, *targetShape)) { SmallVector strides(offsets.size(), 1); - Value slicedOperand = rewriter.create( - loc, reductionOp.getVector(), offsets, *targetShape, strides); + Value slicedOperand = + rewriter.createOrFold( + loc, reductionOp.getVector(), offsets, *targetShape, strides); Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, reductionOp, slicedOperand, reductionOp.getType()); Value result = newOp->getResult(0); @@ -548,12 +552,13 @@ struct UnrollTransposePattern : public OpRewritePattern { permutedOffsets[indices.value()] = elementOffsets[indices.index()]; permutedShape[indices.value()] = (*targetShape)[indices.index()]; } - Value slicedOperand = rewriter.create( - loc, transposeOp.getVector(), permutedOffsets, permutedShape, - strides); - Value transposedSlice = - rewriter.create(loc, slicedOperand, permutation); - result = rewriter.create( + Value slicedOperand = + rewriter.createOrFold( + loc, transposeOp.getVector(), permutedOffsets, permutedShape, + strides); + Value transposedSlice = rewriter.createOrFold( + loc, slicedOperand, permutation); + result = rewriter.createOrFold( loc, transposedSlice, result, elementOffsets, strides); } rewriter.replaceOp(transposeOp, result); @@ -596,17 +601,19 @@ struct UnrollGatherPattern : public OpRewritePattern { // To get the unrolled gather, extract the same slice based on the // decomposed shape from each of the index, mask, and pass-through // vectors. - Value indexSubVec = rewriter.create( + Value indexSubVec = rewriter.createOrFold( loc, gatherOp.getIndexVec(), elementOffsets, *targetShape, strides); - Value maskSubVec = rewriter.create( + Value maskSubVec = rewriter.createOrFold( loc, gatherOp.getMask(), elementOffsets, *targetShape, strides); - Value passThruSubVec = rewriter.create( - loc, gatherOp.getPassThru(), elementOffsets, *targetShape, strides); + Value passThruSubVec = + rewriter.createOrFold( + loc, gatherOp.getPassThru(), elementOffsets, *targetShape, + strides); auto slicedGather = rewriter.create( loc, targetType, gatherOp.getBase(), gatherOp.getIndices(), indexSubVec, maskSubVec, passThruSubVec); - result = rewriter.create( + result = rewriter.createOrFold( loc, slicedGather, result, elementOffsets, strides); } rewriter.replaceOp(gatherOp, result); diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp index eb01b15de75c6..becc32d122697 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp @@ -175,9 +175,10 @@ mlir::Type TensorDescType::parse(::mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - return TensorDescType::get(parser.getContext(), shape, elementType, - encoding.value_or(mlir::Attribute()), - sg_map.value_or(mlir::Attribute())); + return TensorDescType::getChecked( + [&]() { return parser.emitError(parser.getNameLoc()); }, + parser.getContext(), shape, elementType, + encoding.value_or(mlir::Attribute()), sg_map.value_or(mlir::Attribute())); } void TensorDescType::print(::mlir::AsmPrinter &printer) const { @@ -223,6 +224,81 @@ TensorDescType TensorDescType::get(llvm::ArrayRef shape, return Base::get(context, shape, elementType, attr, sg_map); } +LogicalResult TensorDescType::verify( + llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + llvm::ArrayRef shape, mlir::Type elementType, + mlir::Attribute encoding, mlir::Attribute sg_map) { + size_t rank = shape.size(); + if (rank != 1 && rank != 2) + return emitError() << "expected 1D or 2D tensor"; + + auto scatterAttr = mlir::dyn_cast_if_present(encoding); + if (scatterAttr) { + // Expected tensor ranks for scattered data: + // - 1D tensor for fully non-contiguous elements (chunk size == 1) + // - 2D tensor for scattered blocks (chunk size > 1) + IntegerAttr chunkAttr = scatterAttr.getChunkSize(); + unsigned chunkSize = chunkAttr ? chunkAttr.getInt() : 1; + if (rank == 1 && chunkSize != 1) + return emitError() << "expected non-contiguous elements for 1D tensor"; + if (rank == 2 && chunkSize < 2) + return emitError() << "expected chunk blocks for 2D tensor"; + } + + if (auto blockAttr = + mlir::dyn_cast_if_present(encoding)) { + MemorySpaceAttr memorySpaceAttr = blockAttr.getMemorySpace(); + if (rank == 2 && memorySpaceAttr && + memorySpaceAttr.getValue() == MemorySpace::SLM) + return emitError() << "SLM is not supported for 2D block tensor"; + } + + if (auto sgMapAttr = llvm::dyn_cast_if_present(sg_map)) { + ArrayRef wiLayout = sgMapAttr.getWiLayout(); + ArrayRef wiData = sgMapAttr.getWiData(); + + if (rank == 1) { + if (wiLayout[0] != 1 || wiData[0] != 1) + return emitError() + << "outer layout distribution and data mapping must be 1 " + "for 1D tensor"; + } + + if (scatterAttr) { + // Validate subgroup mapping rules for scattered tensors. + // A work-item's slice of the tensor with shape [sg_size] or + // [sg_size, chunk_size] will be [1] or [1, chunks_size] respectively, + // the mapping should reflect that. + if (wiData[0] != 1) + return emitError() + << "cannot map over non-contiguous scattered row elements"; + + IntegerAttr chunkAttr = scatterAttr.getChunkSize(); + unsigned chunkSize = chunkAttr ? chunkAttr.getInt() : 1; + if (wiData[1] != chunkSize) + return emitError() << "work item data mapping must match the number of " + "contiguous elements"; + } + + // For 1D tensor, pad the shape with an outer unit dimension to allow common + // validation logic. + SmallVector tensorShape(shape.begin(), shape.end()); + if (rank == 1) + tensorShape = {1, tensorShape.back()}; + + size_t dims = tensorShape.size(); + for (size_t i = 0; i < dims; ++i) { + uint32_t numElemPerWi = wiLayout[i] * wiData[i]; + if (tensorShape[i] < numElemPerWi || tensorShape[i] % numElemPerWi != 0) + return emitError() << "cannot distribute " << tensorShape[i] << " over " + << wiLayout[i] << " work items with " << wiData[i] + << " elements each"; + } + } + + return success(); +} + } // namespace xegpu } // namespace mlir diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp index cd883baa986b8..e06d99ac20bb7 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp @@ -81,24 +81,28 @@ static bool isWriteHintOrNone(const CachePolicyAttr &attr) { // each dimension. static bool isArgShapesValid(ArrayRef descShape, ArrayRef valShape, SGMapAttr sgMap) { - if (descShape == valShape) { - if (!sgMap) - return true; - - // this can be relaxed if necessary by supporting non-2d shapes distribution - // until the constraints are defined this lives here instead of the tensor - // descriptor type. - return valShape.size() == sgMap.getWiLayout().size(); - } + // Equal shapes with no distribution - no further verification needed. + if (descShape == valShape && !sgMap) + return true; + // Unknown distribution - cannot perform operation on partial shape. if (!sgMap) return false; - if (valShape.size() != descShape.size()) + // Invalid rank or mixed rank usage. + size_t descRank = descShape.size(); + if (descRank > 2 || valShape.size() != descRank) return false; + // For 1D, SG map is guaranteed to be unit size in the outer dimension. + // Only take the distribution over the innermost dimension for validation. + ArrayRef wiLayout = sgMap.getWiLayout(); + SmallVector mapLayout(wiLayout.begin(), wiLayout.end()); + if (descRank == 1) + mapLayout = {wiLayout.back()}; + for (const auto &[factor, dim, expected] : - llvm::zip_equal(sgMap.getWiLayout(), valShape, descShape)) { + llvm::zip_equal(mapLayout, valShape, descShape)) { if (factor * dim != expected) return false; } @@ -227,10 +231,6 @@ LogicalResult CreateNdDescOp::verify() { if (getType().isScattered()) return emitOpError("Expects a non-scattered TensorDesc.\n"); - if (getType().getRank() == 2 && - tdescMemorySpace == static_cast(MemorySpace::SLM)) - return emitOpError("SLM is not supported for 2D Block TensorDesc.\n"); - return success(); } @@ -454,22 +454,7 @@ LogicalResult CreateDescOp::verify() { if (shape != tdescShape) return emitOpError("Incorrect TensorDesc shape. ") << "Expected is " << makeString(shape) << "\n"; - if (auto sgMap = tdescTy.getSGMapAttr()) { - // A work-item's slice of the TensorDesc with shape [sg_size] or - // [sg_size, chunk_size] will be [1] or [1, chunks_size] respectively, - // the mapping should reflect that. - if (sgMap.getWiData()[0] > 1) - return emitOpError("TensorDesc's SG map only supports multiple elements " - "contiguous along rows."); - if (chunkSize != static_cast(sgMap.getWiData()[1])) - return emitOpError( - "TensorDesc's chunkSize must match WI's data mapping."); - if (int rank = tdescTy.getRank(); - (sgMap.getWiLayout()[2 - rank] != tdescShape[0])) - return emitOpError("Detected a conflict between SG map's work-item " - "layout and TensorDesc shape. Check the index of " - "`subgroup_size` in WI layout map."); - } + return success(); } diff --git a/mlir/lib/Transforms/ViewOpGraph.cpp b/mlir/lib/Transforms/ViewOpGraph.cpp index fa0af7665ba4c..75ee3ed74db5e 100644 --- a/mlir/lib/Transforms/ViewOpGraph.cpp +++ b/mlir/lib/Transforms/ViewOpGraph.cpp @@ -14,6 +14,7 @@ #include "mlir/IR/Operation.h" #include "mlir/Pass/Pass.h" #include "mlir/Support/IndentedOstream.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/Format.h" #include "llvm/Support/GraphWriter.h" #include @@ -29,7 +30,7 @@ using namespace mlir; static const StringRef kLineStyleControlFlow = "dashed"; static const StringRef kLineStyleDataFlow = "solid"; -static const StringRef kShapeNode = "ellipse"; +static const StringRef kShapeNode = "Mrecord"; static const StringRef kShapeNone = "plain"; /// Return the size limits for eliding large attributes. @@ -49,16 +50,25 @@ static std::string strFromOs(function_ref func) { return buf; } -/// Escape special characters such as '\n' and quotation marks. -static std::string escapeString(std::string str) { - return strFromOs([&](raw_ostream &os) { os.write_escaped(str); }); -} - /// Put quotation marks around a given string. static std::string quoteString(const std::string &str) { return "\"" + str + "\""; } +/// For Graphviz record nodes: +/// " Braces, vertical bars and angle brackets must be escaped with a backslash +/// character if you wish them to appear as a literal character " +std::string escapeLabelString(const std::string &str) { + std::string buf; + llvm::raw_string_ostream os(buf); + for (char c : str) { + if (llvm::is_contained({'{', '|', '<', '}', '>', '\n', '"'}, c)) + os << '\\'; + os << c; + } + return buf; +} + using AttributeMap = std::map; namespace { @@ -79,6 +89,12 @@ struct Node { std::optional clusterId; }; +struct DataFlowEdge { + Value value; + Node node; + std::string port; +}; + /// This pass generates a Graphviz dataflow visualization of an MLIR operation. /// Note: See https://www.graphviz.org/doc/info/lang.html for more information /// about the Graphviz DOT language. @@ -107,7 +123,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { private: /// Generate a color mapping that will color every operation with the same /// name the same way. It'll interpolate the hue in the HSV color-space, - /// attempting to keep the contrast suitable for black text. + /// using muted colors that provide good contrast for black text. template void initColorMapping(T &irEntity) { backgroundColors.clear(); @@ -120,8 +136,10 @@ class PrintOpPass : public impl::ViewOpGraphBase { }); for (auto indexedOps : llvm::enumerate(ops)) { double hue = ((double)indexedOps.index()) / ops.size(); + // Use lower saturation (0.3) and higher value (0.95) for better + // readability backgroundColors[indexedOps.value()->getName()].second = - std::to_string(hue) + " 1.0 1.0"; + std::to_string(hue) + " 0.3 0.95"; } } @@ -129,8 +147,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { /// emitted. void emitAllEdgeStmts() { if (printDataFlowEdges) { - for (const auto &[value, node, label] : dataFlowEdges) { - emitEdgeStmt(valueToNode[value], node, label, kLineStyleDataFlow); + for (const auto &e : dataFlowEdges) { + emitEdgeStmt(valueToNode[e.value], e.node, e.port, kLineStyleDataFlow); } } @@ -147,8 +165,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { os.indent(); // Emit invisible anchor node from/to which arrows can be drawn. Node anchorNode = emitNodeStmt(" ", kShapeNone); - os << attrStmt("label", quoteString(escapeString(std::move(label)))) - << ";\n"; + os << attrStmt("label", quoteString(label)) << ";\n"; builder(); os.unindent(); os << "}\n"; @@ -176,7 +193,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { // Always emit splat attributes. if (isa(attr)) { - attr.print(os); + os << escapeLabelString( + strFromOs([&](raw_ostream &os) { attr.print(os); })); return; } @@ -184,8 +202,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { auto elements = dyn_cast(attr); if (elements && elements.getNumElements() > largeAttrLimit) { os << std::string(elements.getShapedType().getRank(), '[') << "..." - << std::string(elements.getShapedType().getRank(), ']') << " : " - << elements.getType(); + << std::string(elements.getShapedType().getRank(), ']') << " : "; + emitMlirType(os, elements.getType()); return; } @@ -199,19 +217,27 @@ class PrintOpPass : public impl::ViewOpGraphBase { std::string buf; llvm::raw_string_ostream ss(buf); attr.print(ss); - os << truncateString(buf); + os << escapeLabelString(truncateString(buf)); + } + + // Print a truncated and escaped MLIR type to `os`. + void emitMlirType(raw_ostream &os, Type type) { + std::string buf; + llvm::raw_string_ostream ss(buf); + type.print(ss); + os << escapeLabelString(truncateString(buf)); + } + + // Print a truncated and escaped MLIR operand to `os`. + void emitMlirOperand(raw_ostream &os, Value operand) { + operand.printAsOperand(os, OpPrintingFlags()); } /// Append an edge to the list of edges. /// Note: Edges are written to the output stream via `emitAllEdgeStmts`. - void emitEdgeStmt(Node n1, Node n2, std::string label, StringRef style) { + void emitEdgeStmt(Node n1, Node n2, std::string port, StringRef style) { AttributeMap attrs; attrs["style"] = style.str(); - // Do not label edges that start/end at a cluster boundary. Such edges are - // clipped at the boundary, but labels are not. This can lead to labels - // floating around without any edge next to them. - if (!n1.clusterId && !n2.clusterId) - attrs["label"] = quoteString(escapeString(std::move(label))); // Use `ltail` and `lhead` to draw edges between clusters. if (n1.clusterId) attrs["ltail"] = "cluster_" + std::to_string(*n1.clusterId); @@ -219,7 +245,15 @@ class PrintOpPass : public impl::ViewOpGraphBase { attrs["lhead"] = "cluster_" + std::to_string(*n2.clusterId); edges.push_back(strFromOs([&](raw_ostream &os) { - os << llvm::format("v%i -> v%i ", n1.id, n2.id); + os << "v" << n1.id; + if (!port.empty() && !n1.clusterId) + // Attach edge to south compass point of the result + os << ":res" << port << ":s"; + os << " -> "; + os << "v" << n2.id; + if (!port.empty() && !n2.clusterId) + // Attach edge to north compass point of the operand + os << ":arg" << port << ":n"; emitAttrList(os, attrs); })); } @@ -240,11 +274,11 @@ class PrintOpPass : public impl::ViewOpGraphBase { StringRef background = "") { int nodeId = ++counter; AttributeMap attrs; - attrs["label"] = quoteString(escapeString(std::move(label))); + attrs["label"] = quoteString(label); attrs["shape"] = shape.str(); if (!background.empty()) { attrs["style"] = "filled"; - attrs["fillcolor"] = ("\"" + background + "\"").str(); + attrs["fillcolor"] = quoteString(background.str()); } os << llvm::format("v%i ", nodeId); emitAttrList(os, attrs); @@ -252,8 +286,18 @@ class PrintOpPass : public impl::ViewOpGraphBase { return Node(nodeId); } - /// Generate a label for an operation. - std::string getLabel(Operation *op) { + std::string getValuePortName(Value operand) { + // Print value as an operand and omit the leading '%' character. + auto str = strFromOs([&](raw_ostream &os) { + operand.printAsOperand(os, OpPrintingFlags()); + }); + // Replace % and # with _ + std::replace(str.begin(), str.end(), '%', '_'); + std::replace(str.begin(), str.end(), '#', '_'); + return str; + } + + std::string getClusterLabel(Operation *op) { return strFromOs([&](raw_ostream &os) { // Print operation name and type. os << op->getName(); @@ -267,18 +311,73 @@ class PrintOpPass : public impl::ViewOpGraphBase { // Print attributes. if (printAttrs) { - os << "\n"; + os << "\\l"; + for (const NamedAttribute &attr : op->getAttrs()) { + os << escapeLabelString(attr.getName().getValue().str()) << ": "; + emitMlirAttr(os, attr.getValue()); + os << "\\l"; + } + } + }); + } + + /// Generate a label for an operation. + std::string getRecordLabel(Operation *op) { + return strFromOs([&](raw_ostream &os) { + os << "{"; + + // Print operation inputs. + if (op->getNumOperands() > 0) { + os << "{"; + auto operandToPort = [&](Value operand) { + os << " "; + emitMlirOperand(os, operand); + }; + interleave(op->getOperands(), os, operandToPort, "|"); + os << "}|"; + } + // Print operation name and type. + os << op->getName() << "\\l"; + + // Print attributes. + if (printAttrs && !op->getAttrs().empty()) { + // Extra line break to separate attributes from the operation name. + os << "\\l"; for (const NamedAttribute &attr : op->getAttrs()) { - os << '\n' << attr.getName().getValue() << ": "; + os << attr.getName().getValue() << ": "; emitMlirAttr(os, attr.getValue()); + os << "\\l"; } } + + if (op->getNumResults() > 0) { + os << "|{"; + auto resultToPort = [&](Value result) { + os << " "; + emitMlirOperand(os, result); + if (printResultTypes) { + os << " "; + emitMlirType(os, result.getType()); + } + }; + interleave(op->getResults(), os, resultToPort, "|"); + os << "}"; + } + + os << "}"; }); } /// Generate a label for a block argument. std::string getLabel(BlockArgument arg) { - return "arg" + std::to_string(arg.getArgNumber()); + return strFromOs([&](raw_ostream &os) { + os << " "; + arg.printAsOperand(os, OpPrintingFlags()); + if (printResultTypes) { + os << " "; + emitMlirType(os, arg.getType()); + } + }); } /// Process a block. Emit a cluster and one node per block argument and @@ -287,14 +386,12 @@ class PrintOpPass : public impl::ViewOpGraphBase { emitClusterStmt([&]() { for (BlockArgument &blockArg : block.getArguments()) valueToNode[blockArg] = emitNodeStmt(getLabel(blockArg)); - // Emit a node for each operation. std::optional prevNode; for (Operation &op : block) { Node nextNode = processOperation(&op); if (printControlFlowEdges && prevNode) - emitEdgeStmt(*prevNode, nextNode, /*label=*/"", - kLineStyleControlFlow); + emitEdgeStmt(*prevNode, nextNode, /*port=*/"", kLineStyleControlFlow); prevNode = nextNode; } }); @@ -311,18 +408,19 @@ class PrintOpPass : public impl::ViewOpGraphBase { for (Region ®ion : op->getRegions()) processRegion(region); }, - getLabel(op)); + getClusterLabel(op)); } else { - node = emitNodeStmt(getLabel(op), kShapeNode, + node = emitNodeStmt(getRecordLabel(op), kShapeNode, backgroundColors[op->getName()].second); } // Insert data flow edges originating from each operand. if (printDataFlowEdges) { unsigned numOperands = op->getNumOperands(); - for (unsigned i = 0; i < numOperands; i++) - dataFlowEdges.push_back({op->getOperand(i), node, - numOperands == 1 ? "" : std::to_string(i)}); + for (unsigned i = 0; i < numOperands; i++) { + auto operand = op->getOperand(i); + dataFlowEdges.push_back({operand, node, getValuePortName(operand)}); + } } for (Value result : op->getResults()) @@ -352,7 +450,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { /// Mapping of SSA values to Graphviz nodes/clusters. DenseMap valueToNode; /// Output for data flow edges is delayed until the end to handle cycles - std::vector> dataFlowEdges; + std::vector dataFlowEdges; /// Counter for generating unique node/subgraph identifiers. int counter = 0; diff --git a/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir b/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir index 043f9422d8790..d68ba44ee8840 100644 --- a/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir +++ b/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir @@ -83,17 +83,17 @@ func.func @vaddi_reduction(%arg0 : vector<8xi32>, %arg1 : vector<8xi32>) -> (i32 // CHECK-LABEL: @transpose // CHECK-SAME: (%[[ARG0:.+]]: vector<3xi32>, %[[ARG1:.+]]: vector<3xi32>) func.func @transpose(%arg0 : vector<2x3xi32>) -> (vector<3x2xi32>) { - // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<2xi32> + // CHECK: %[[UB:.*]] = ub.poison : vector<2xi32> // CHECK: %[[EXTRACT0:.*]] = vector.extract %[[ARG0]][0] : i32 from vector<3xi32> - // CHECK: %[[INSERT0:.*]]= vector.insert %[[EXTRACT0]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT0:.*]]= vector.insert %[[EXTRACT0]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT1:.*]] = vector.extract %[[ARG1]][0] : i32 from vector<3xi32> // CHECK: %[[INSERT1:.*]] = vector.insert %[[EXTRACT1]], %[[INSERT0]][1] : i32 into vector<2xi32> // CHECK: %[[EXTRACT2:.*]] = vector.extract %[[ARG0]][1] : i32 from vector<3xi32> - // CHECK: %[[INSERT2:.*]] = vector.insert %[[EXTRACT2]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT2:.*]] = vector.insert %[[EXTRACT2]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT3:.*]] = vector.extract %[[ARG1]][1] : i32 from vector<3xi32> // CHECK: %[[INSERT3:.*]] = vector.insert %[[EXTRACT3]], %[[INSERT2]] [1] : i32 into vector<2xi32> // CHECK: %[[EXTRACT4:.*]] = vector.extract %[[ARG0]][2] : i32 from vector<3xi32> - // CHECK: %[[INSERT4:.*]] = vector.insert %[[EXTRACT4]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT4:.*]] = vector.insert %[[EXTRACT4]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT5:.*]] = vector.extract %[[ARG1]][2] : i32 from vector<3xi32> // CHECK: %[[INSERT5:.*]] = vector.insert %[[EXTRACT5]], %[[INSERT4]] [1] : i32 into vector<2xi32> // CHECK: return %[[INSERT1]], %[[INSERT3]], %[[INSERT5]] : vector<2xi32>, vector<2xi32>, vector<2xi32> diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir index 75b48f2b06d89..460e207d62de6 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir @@ -24,7 +24,8 @@ func.func @unranked_add(%arg0 : tensor<10x10xf32> , %arg1 : tensor<10x10xf32>, % %reduce = tosa.reduce_max %arg0 {axis = 1 : i32} : (tensor<10x10xf32>) -> tensor<10x1xf32> %1 = tosa.add %reduce, %arg1 : (tensor<10x1xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> %0 = tosa.add %1, %arg2 : (tensor<10x10xf32>, tensor<*xf32>) -> tensor<*xf32> - %2 = tosa.reshape %0 {new_shape = array} : (tensor<*xf32>) -> tensor<10x10xf32> + %s = tosa.const_shape {value = dense<[10, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %0, %s : (tensor<*xf32>, !tosa.shape<2>) -> tensor<10x10xf32> return %2 : tensor<10x10xf32> } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir index 6e8501aaaf2af..d8ba28a3ce887 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -911,12 +911,25 @@ func.func @test_negate_quantized(%arg0: tensor<1xi8>) -> () { // CHECK: linalg.yield [[TRUNC]] %2 = tosa.negate %arg0 {input1_zp = 32640 : i32, output_zp = 0 : i32} : (tensor<1xi8>) -> tensor<1xi8> + // CHECK: linalg.generic + // CHECK: ^bb0(%[[BBARG0:.+]]: i8, + // CHECK: [[C_128:%.+]] = arith.constant -128 + // CHECK: [[EXT:%.+]] = arith.extsi %[[BBARG0]] : i8 to i16 + // CHECK: [[SUB:%.+]] = arith.subi [[C_128]], [[EXT]] + // CHECK: [[MIN:%.+]] = arith.constant -128 + // CHECK: [[MAX:%.+]] = arith.constant 127 + // CHECK: [[LBOUND:%.+]] = arith.maxsi [[MIN]], [[SUB]] + // CHECK: [[UBOUND:%.+]] = arith.minsi [[MAX]], [[LBOUND]] + // CHECK: [[TRUNC:%.+]] = arith.trunci [[UBOUND]] + // CHECK: linalg.yield [[TRUNC]] + %3 = tosa.negate %arg0 {input1_zp = -128 : i32, output_zp = 0 : i32} : (tensor<1xi8>) -> tensor<1xi8> + // CHECK: linalg.generic // CHECK: ^bb0(%[[BBARG0:.+]]: i8, // CHECK: [[ZERO:%.+]] = arith.constant 0 // CHECK: [[SUB:%.+]] = arith.subi [[ZERO]], // CHECK: linalg.yield [[SUB]] - %3 = tosa.negate %arg0 {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> + %4 = tosa.negate %arg0 {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> return } @@ -1387,7 +1400,8 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + // CHECK: [[CONST3:%.+]] = tosa.const_shape {value = dense<[4, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape [[GENERIC]], [[CONST3]] %cst21 = tosa.const_shape { value = dense<[2, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst21: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<4x3xi8> @@ -1395,7 +1409,8 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + // CHECK: [[CONST8:%.+]] = tosa.const_shape {value = dense<[2, 6]> : tensor<2xindex>} : () -> !tosa.shape<2> + // tosa.reshape [[GENERIC]], [[CONST8]] %cst12 = tosa.const_shape { value = dense<[1, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> %1 = tosa.tile %arg0, %cst12: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<2x6xi8> @@ -1403,8 +1418,9 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} %cst57 = tosa.const_shape { value = dense<[5, 7]> : tensor<2xindex> } : () -> !tosa.shape<2> + // CHECK: [[CONST13:%.+]] = tosa.const_shape {value = dense<[10, 21]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape [[GENERIC]], [[CONST13]] %2 = tosa.tile %arg0, %cst57: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<10x21xi8> return @@ -1424,7 +1440,8 @@ func.func @tile_dyn_input(%arg0 : tensor) -> () { // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor) outs(%[[INIT]] : tensor<2x?x1x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + // CHECK: %[[CONST3:.+]] = tosa.const_shape {value = dense<[-1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %[[GENERIC]], %[[CONST3]] %cst21 = tosa.const_shape { value = dense<[2, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst21: (tensor, !tosa.shape<2>) -> tensor @@ -1445,7 +1462,8 @@ func.func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () { // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs(%[[INIT]] : tensor<2x2x?x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + // CHECK: %[[CONST2:.+]] = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %[[GENERIC]], %[[CONST2]] %cst = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<2x?xi8> diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir index e83e898644bc0..c2eaba4c563d0 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -6,7 +6,8 @@ // CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor // CHECK: return %[[ARG_0]] : tensor func.func @test_reshape_0d_same_s2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -18,7 +19,8 @@ func.func @test_reshape_0d_same_s2s_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_0d_up_s2d_auto(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -30,7 +32,8 @@ func.func @test_reshape_0d_up_s2d_auto(%arg0: tensor) -> tensor { // CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_0d_up_s2d_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -41,7 +44,8 @@ func.func @test_reshape_0d_up_s2d_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] output_shape [1] : tensor into tensor<1xf32> // CHECK: return %[[VAL_0]] : tensor<1xf32> func.func @test_reshape_0d_up_s2s_auto(%arg0: tensor) -> tensor<1xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<1xf32> + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor<1xf32> return %0 : tensor<1xf32> } @@ -52,7 +56,8 @@ func.func @test_reshape_0d_up_s2s_auto(%arg0: tensor) -> tensor<1xf32> { // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] output_shape [1] : tensor into tensor<1xf32> // CHECK: return %[[VAL_0]] : tensor<1xf32> func.func @test_reshape_0d_up_s2s_explicit(%arg0: tensor) -> tensor<1xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<1xf32> + %s = tosa.const_shape { value = dense<1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor<1xf32> return %0 : tensor<1xf32> } @@ -64,7 +69,8 @@ func.func @test_reshape_0d_up_s2s_explicit(%arg0: tensor) -> tensor<1xf32> // CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1xf32> into tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_1d_down_d2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -75,7 +81,8 @@ func.func @test_reshape_1d_down_d2s_explicit(%arg0: tensor) -> tensor into tensor // CHECK: return %[[VAL_0]] : tensor func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1xf32>) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1xf32>, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -90,7 +97,8 @@ func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor into tensor<2x?xf32> // CHECK: return %[[EXPANDED]] : tensor<2x?xf32> func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?xf32> + %s = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor<2x?xf32> return %0 : tensor<2x?xf32> } @@ -101,7 +109,8 @@ func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] output_shape [2, 3] : tensor<6xf32> into tensor<2x3xf32> // CHECK: return %[[VAL_0]] : tensor<2x3xf32> func.func @test_reshape_1d_up_s2s_explicit(%arg0: tensor<6xf32>) -> tensor<2x3xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<6xf32>) -> tensor<2x3xf32> + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<6xf32>, !tosa.shape<2>) -> tensor<2x3xf32> return %0 : tensor<2x3xf32> } @@ -112,7 +121,8 @@ func.func @test_reshape_1d_up_s2s_explicit(%arg0: tensor<6xf32>) -> tensor<2x3xf // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x?xf32> into tensor // CHECK: return %[[VAL_0]] : tensor func.func @test_reshape_2d_down_d2d_auto(%arg0: tensor<2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x?xf32>, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -123,7 +133,8 @@ func.func @test_reshape_2d_down_d2d_auto(%arg0: tensor<2x?xf32>) -> tensor into tensor<6xf32> // CHECK: return %[[VAL_0]] : tensor<6xf32> func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x3xf32>) -> tensor<6xf32> + %s = tosa.const_shape { value = dense<6> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x3xf32>, !tosa.shape<1>) -> tensor<6xf32> return %0 : tensor<6xf32> } @@ -139,7 +150,8 @@ func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6 // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] output_shape [2, %[[DIV]]] : tensor into tensor<2x?xf32> // CHECK: return %[[EXPANDED]] : tensor<2x?xf32> func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?xf32> + %s = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor<2x?xf32> return %0 : tensor<2x?xf32> } @@ -152,10 +164,12 @@ func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor) -> tensor<2x?xf // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<4x2xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_2d_same_s2d_auto(%arg0: tensor<2x4xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor + %s = tosa.const_shape { value = dense<[-1, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor return %0 : tensor } + // ----- // CHECK-LABEL: test_reshape_2d_same_s2d_explicit @@ -165,7 +179,8 @@ func.func @test_reshape_2d_same_s2d_auto(%arg0: tensor<2x4xf32>) -> tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_2d_same_s2d_explicit(%arg0: tensor<2x4xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor + %s = tosa.const_shape { value = dense<[4, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor return %0 : tensor } @@ -177,7 +192,8 @@ func.func @test_reshape_2d_same_s2d_explicit(%arg0: tensor<2x4xf32>) -> tensor into tensor<2x3xf32> // CHECK: return %[[VAL_1]] : tensor<2x3xf32> func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2xf32>) -> tensor<2x3xf32> + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2xf32>, !tosa.shape<2>) -> tensor<2x3xf32> return %0 : tensor<2x3xf32> } @@ -194,7 +210,8 @@ func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2 // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[0, 3, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -211,7 +228,8 @@ func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tens // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x?x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[2, -1, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x?x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -227,7 +245,8 @@ func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor into tensor<2x3x?xf32> // CHECK: return %[[VAL_1]] : tensor<2x3x?xf32> func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor) -> tensor<2x3x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x?xf32> + %s = tosa.const_shape { value = dense<[2, 3, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x?xf32> return %0 : tensor<2x3x?xf32> } @@ -244,7 +263,8 @@ func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor) -> t // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[0, 3, 2]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -261,7 +281,8 @@ func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) -> // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -272,7 +293,8 @@ func.func @test_reshape_3d_same_d2d_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_0:.*]] = tensor.cast %[[ARG_0]] : tensor to tensor<2x3x?xf32> // CHECK: return %[[VAL_0]] : tensor<2x3x?xf32> func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor) -> tensor<2x3x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x?xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x?xf32> return %0 : tensor<2x3x?xf32> } @@ -289,7 +311,8 @@ func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor) // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<2x?x4xf32> to tensor<2x3x4xf32> // CHECK: return %[[VAL_2]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, -1, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -306,7 +329,8 @@ func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor) -> tensor<2x3 // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor<2x3x4xf32> // CHECK: return %[[VAL_2]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_d2s_explicit(%arg0: tensor) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -316,7 +340,8 @@ func.func @test_reshape_3d_same_d2s_explicit(%arg0: tensor) -> tensor // CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x3x4xf32> // CHECK: return %[[ARG_0]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x3x4xf32>, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -333,7 +358,8 @@ func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>) // CHECK: %[[CAST:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor<1x3x2x1xf32> // CHECK: return %[[CAST]] : tensor<1x3x2x1xf32> func.func @test_reshape_3d_up_d2s_explicit(%input: tensor) -> tensor<1x3x2x1xf32> { - %0 = tosa.reshape %input {new_shape = array} : (tensor) -> tensor<1x3x2x1xf32> + %s = tosa.const_shape { value = dense<[1, 3, 2, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %0 = tosa.reshape %input, %s : (tensor, !tosa.shape<4>) -> tensor<1x3x2x1xf32> return %0 : tensor<1x3x2x1xf32> } @@ -345,7 +371,8 @@ func.func @test_reshape_3d_up_d2s_explicit(%input: tensor) -> tensor< // CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1x1x1x1xf32> into tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -361,7 +388,8 @@ func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor) -> tens // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 2, 3] : tensor into tensor // CHECK: return %[[EXPANDED]] : tensor func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<[-1, 2, 3]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -377,7 +405,8 @@ func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor) -> tensor // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 5, 77] : tensor into tensor // CHECK: return %[[EXPANDED]] : tensor func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x?x5x7x11xf32>) -> tensor + %s = tosa.const_shape { value = dense<[-1, 5, 77]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x?x5x7x11xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -388,7 +417,8 @@ func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> ten // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32> // CHECK: return %[[VAL_0]] : tensor<6x5x77xf32> func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> + %s = tosa.const_shape { value = dense<[6, 5, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x3x5x7x11xf32>, !tosa.shape<3>) -> tensor<6x5x77xf32> return %0 : tensor<6x5x77xf32> } @@ -400,10 +430,13 @@ func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> ten // // See https://github.com/llvm/llvm-project/pull/91521 for a full description. +// ----- + // CHECK-LABEL: reshape_bug_fix // CHECK: tensor.expand_shape func.func @reshape_bug_fix(%arg0: tensor) -> tensor<1x1x1x?xf32> { - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x1x1x?xf32> + %1 = "tosa.const_shape"() {value = dense<[1, 1, 1, -1]> : tensor<4xindex>} : () -> !tosa.shape<4> + %0 = "tosa.reshape"(%arg0, %1) : (tensor, !tosa.shape<4>) -> tensor<1x1x1x?xf32> return %0 : tensor<1x1x1x?xf32> } @@ -414,21 +447,22 @@ func.func @reshape_bug_fix(%arg0: tensor) -> tensor<1x1x1x?xf32> { // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32> // CHECK: return %[[VAL_0]] : tensor<6x5x77xf32> func.func @test_reshape_6d_down_s2s_explicit(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> + %s = tosa.const_shape { value = dense<[6, 5, 77]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x3x5x7x11xf32>, !tosa.shape<3>) -> tensor<6x5x77xf32> return %0 : tensor<6x5x77xf32> } // ----- // CHECK-LABEL: @test_reshape_samerank_unsigned -// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xui8>) +// CHECK-SAME: (%[[VAL_0:.*]]: tensor<3x2xui8>) func.func @test_reshape_samerank_unsigned(%arg0: tensor<3x2xui8>) -> tensor<2x3xui8> { - // CHECK-NEXT: %[[CAST1:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : tensor<3x2xui8> to tensor<3x2xi8> - // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[CAST1]] {{\[}}[0, 1]] : tensor<3x2xi8> into tensor<6xi8> - // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]] output_shape {{\[}}2, 3] : tensor<6xi8> into tensor<2x3xi8> - // CHECK-NEXT: %[[CAST2:.*]] = builtin.unrealized_conversion_cast %[[RESHAPE2]] : tensor<2x3xi8> to tensor<2x3xui8 - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2xui8>) -> tensor<2x3xui8> - // CHECK-NEXT: return %[[CAST2]] + // CHECK: %[[CAST1:.*]] = builtin.unrealized_conversion_cast %arg0 : tensor<3x2xui8> to tensor<3x2xi8> + // CHECK: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[CAST1]] {{\[}}[0, 1]] : tensor<3x2xi8> into tensor<6xi8> + // CHECK: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]] output_shape {{\[}}2, 3] : tensor<6xi8> into tensor<2x3xi8> + // CHECK: %[[CAST2:.*]] = builtin.unrealized_conversion_cast %[[RESHAPE2]] : tensor<2x3xi8> to tensor<2x3xui8 + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s): (tensor<3x2xui8>, !tosa.shape<2>) -> tensor<2x3xui8> return %0 : tensor<2x3xui8> } diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir index 5252bb25ecab5..d319b9043b4b8 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir @@ -1,5 +1,17 @@ -// Most of the vector lowering is tested in vector-to-llvm.mlir, this file only for the interface smoke test // RUN: mlir-opt --convert-to-llvm="filter-dialects=vector" --split-input-file %s | FileCheck %s +// RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s + +//===========================================================================// +// Basic tests for Vector-to-LLVM conversion +// +// These examples are meant to be convertible to LLVM with: +// * `populateVectorToLLVMConversionPatterns`, +// i.e. no other patterns should be required. +//===========================================================================// + +//===----------------------------------------------------------------------===// +// vector.bitcast +//===----------------------------------------------------------------------===// func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector) -> vector { %0 = vector.bitcast %arg0 : vector to vector @@ -12,3 +24,2391 @@ func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector) -> vector { // CHECK: %[[VEC_I32_1D:.*]] = llvm.bitcast %[[VEC_F32_1D]] : vector<1xf32> to vector<1xi32> // CHECK: %[[VEC_I32_0D:.*]] = builtin.unrealized_conversion_cast %[[VEC_I32_1D]] : vector<1xi32> to vector // CHECK: return %[[VEC_I32_0D]] : vector + +// ----- + +func.func @bitcast_f32_to_i32_vector(%arg0: vector<16xf32>) -> vector<16xi32> { + %0 = vector.bitcast %arg0 : vector<16xf32> to vector<16xi32> + return %0 : vector<16xi32> +} + + +// CHECK-LABEL: @bitcast_f32_to_i32_vector +// CHECK-SAME: %[[ARG_0:.*]]: vector<16xf32> +// CHECK: llvm.bitcast %[[ARG_0]] : vector<16xf32> to vector<16xi32> + +// ----- + +func.func @bitcast_f32_to_i32_vector_scalable(%arg0: vector<[16]xf32>) -> vector<[16]xi32> { + %0 = vector.bitcast %arg0 : vector<[16]xf32> to vector<[16]xi32> + return %0 : vector<[16]xi32> +} + +// CHECK-LABEL: @bitcast_f32_to_i32_vector_scalable +// CHECK-SAME: %[[ARG_0:.*]]: vector<[16]xf32> +// CHECK: llvm.bitcast %[[ARG_0]] : vector<[16]xf32> to vector<[16]xi32> + +// ----- + +func.func @bitcast_i8_to_f32_vector(%arg0: vector<64xi8>) -> vector<16xf32> { + %0 = vector.bitcast %arg0 : vector<64xi8> to vector<16xf32> + return %0 : vector<16xf32> +} + +// CHECK-LABEL: @bitcast_i8_to_f32_vector +// CHECK-SAME: %[[ARG_0:.*]]: vector<64xi8> +// CHECK: llvm.bitcast %[[ARG_0]] : vector<64xi8> to vector<16xf32> + +// ----- + +func.func @bitcast_i8_to_f32_vector_scalable(%arg0: vector<[64]xi8>) -> vector<[16]xf32> { + %0 = vector.bitcast %arg0 : vector<[64]xi8> to vector<[16]xf32> + return %0 : vector<[16]xf32> +} + +// CHECK-LABEL: @bitcast_i8_to_f32_vector_scalable +// CHECK-SAME: %[[ARG_0:.*]]: vector<[64]xi8> +// CHECK: llvm.bitcast %[[ARG_0]] : vector<[64]xi8> to vector<[16]xf32> + +// ----- + +func.func @bitcast_index_to_i8_vector(%arg0: vector<16xindex>) -> vector<128xi8> { + %0 = vector.bitcast %arg0 : vector<16xindex> to vector<128xi8> + return %0 : vector<128xi8> +} + +// CHECK-LABEL: @bitcast_index_to_i8_vector +// CHECK-SAME: %[[ARG_0:.*]]: vector<16xindex> +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<16xindex> to vector<16xi64> +// CHECK: llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8> + +// ----- + +func.func @bitcast_index_to_i8_vector_scalable(%arg0: vector<[16]xindex>) -> vector<[128]xi8> { + %0 = vector.bitcast %arg0 : vector<[16]xindex> to vector<[128]xi8> + return %0 : vector<[128]xi8> +} + +// CHECK-LABEL: @bitcast_index_to_i8_vector_scalable +// CHECK-SAME: %[[ARG_0:.*]]: vector<[16]xindex> +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<[16]xindex> to vector<[16]xi64> +// CHECK: llvm.bitcast %[[T0]] : vector<[16]xi64> to vector<[128]xi8> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.broadcast +//===----------------------------------------------------------------------===// + +func.func @broadcast_vec0d_from_vec0d(%arg0: vector) -> vector { + %0 = vector.broadcast %arg0 : vector to vector + return %0 : vector +} +// CHECK-LABEL: @broadcast_vec0d_from_vec0d( +// CHECK-SAME: %[[A:.*]]: vector) +// CHECK: return %[[A]] : vector + +// ----- + +func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> { + %0 = vector.broadcast %arg0 : vector<2xf32> to vector<2xf32> + return %0 : vector<2xf32> +} +// CHECK-LABEL: @broadcast_vec1d_from_vec1d( +// CHECK-SAME: %[[A:.*]]: vector<2xf32>) +// CHECK: return %[[A]] : vector<2xf32> + +// ----- + +func.func @broadcast_vec1d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector<[2]xf32> { + %0 = vector.broadcast %arg0 : vector<[2]xf32> to vector<[2]xf32> + return %0 : vector<[2]xf32> +} +// CHECK-LABEL: @broadcast_vec1d_from_vec1d_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) +// CHECK: return %[[A]] : vector<[2]xf32> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.shuffle +//===----------------------------------------------------------------------===// + +func.func @shuffle_0D_direct(%arg0: vector) -> vector<3xf32> { + %1 = vector.shuffle %arg0, %arg0 [0, 1, 0] : vector, vector + return %1 : vector<3xf32> +} +// CHECK-LABEL: @shuffle_0D_direct( +// CHECK-SAME: %[[A:.*]]: vector +// CHECK: %[[c:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> +// CHECK: %[[s:.*]] = llvm.shufflevector %[[c]], %[[c]] [0, 1, 0] : vector<1xf32> +// CHECK: return %[[s]] : vector<3xf32> + +// ----- + +func.func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2xf32> { + %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xf32>, vector<2xf32> + return %1 : vector<2xf32> +} +// CHECK-LABEL: @shuffle_1D_direct( +// CHECK-SAME: %[[A:.*]]: vector<2xf32>, +// CHECK-SAME: %[[B:.*]]: vector<2xf32>) +// CHECK: return %[[A:.*]]: vector<2xf32> + +// ----- + +func.func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) -> vector<2xindex> { + %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xindex>, vector<2xindex> + return %1 : vector<2xindex> +} +// CHECK-LABEL: @shuffle_1D_index_direct( +// CHECK-SAME: %[[A:.*]]: vector<2xindex>, +// CHECK-SAME: %[[B:.*]]: vector<2xindex>) +// CHECK: return %[[A:.*]]: vector<2xindex> + +// ----- + +func.func @shuffle_poison_mask(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<4xf32> { + %1 = vector.shuffle %arg0, %arg1 [0, -1, 3, -1] : vector<2xf32>, vector<2xf32> + return %1 : vector<4xf32> +} +// CHECK-LABEL: @shuffle_poison_mask( +// CHECK-SAME: %[[A:.*]]: vector<2xf32>, %[[B:.*]]: vector<2xf32>) +// CHECK: %[[s:.*]] = llvm.shufflevector %[[A]], %[[B]] [0, -1, 3, -1] : vector<2xf32> + +// ----- + +func.func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> { + %1 = vector.shuffle %arg0, %arg1 [4, 3, 2, 1, 0] : vector<2xf32>, vector<3xf32> + return %1 : vector<5xf32> +} +// CHECK-LABEL: @shuffle_1D( +// CHECK-SAME: %[[A:.*]]: vector<2xf32>, +// CHECK-SAME: %[[B:.*]]: vector<3xf32>) +// CHECK: %[[U0:.*]] = llvm.mlir.poison : vector<5xf32> +// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[E1:.*]] = llvm.extractelement %[[B]][%[[C2]] : i64] : vector<3xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[I1:.*]] = llvm.insertelement %[[E1]], %[[U0]][%[[C0]] : i64] : vector<5xf32> +// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[E2:.*]] = llvm.extractelement %[[B]][%[[C1]] : i64] : vector<3xf32> +// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[I2:.*]] = llvm.insertelement %[[E2]], %[[I1]][%[[C1]] : i64] : vector<5xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[E3:.*]] = llvm.extractelement %[[B]][%[[C0]] : i64] : vector<3xf32> +// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[I3:.*]] = llvm.insertelement %[[E3]], %[[I2]][%[[C2]] : i64] : vector<5xf32> +// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[E4:.*]] = llvm.extractelement %[[A]][%[[C1]] : i64] : vector<2xf32> +// CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[I4:.*]] = llvm.insertelement %[[E4]], %[[I3]][%[[C3]] : i64] : vector<5xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[E5:.*]] = llvm.extractelement %[[A]][%[[C0]] : i64] : vector<2xf32> +// CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : index) : i64 +// CHECK: %[[I5:.*]] = llvm.insertelement %[[E5]], %[[I4]][%[[C4]] : i64] : vector<5xf32> +// CHECK: return %[[I5]] : vector<5xf32> + +// ----- + +func.func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> { + %1 = vector.shuffle %a, %b[1, 0, 2] : vector<1x4xf32>, vector<2x4xf32> + return %1 : vector<3x4xf32> +} +// CHECK-LABEL: @shuffle_2D( +// CHECK-SAME: %[[A:.*]]: vector<1x4xf32>, +// CHECK-SAME: %[[B:.*]]: vector<2x4xf32>) +// CHECK-DAG: %[[VAL_0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>> +// CHECK-DAG: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>> +// CHECK: %[[U0:.*]] = llvm.mlir.poison : !llvm.array<3 x vector<4xf32>> +// CHECK: %[[E1:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.array<2 x vector<4xf32>> +// CHECK: %[[I1:.*]] = llvm.insertvalue %[[E1]], %[[U0]][0] : !llvm.array<3 x vector<4xf32>> +// CHECK: %[[E2:.*]] = llvm.extractvalue %[[VAL_0]][0] : !llvm.array<1 x vector<4xf32>> +// CHECK: %[[I2:.*]] = llvm.insertvalue %[[E2]], %[[I1]][1] : !llvm.array<3 x vector<4xf32>> +// CHECK: %[[E3:.*]] = llvm.extractvalue %[[VAL_1]][1] : !llvm.array<2 x vector<4xf32>> +// CHECK: %[[I3:.*]] = llvm.insertvalue %[[E3]], %[[I2]][2] : !llvm.array<3 x vector<4xf32>> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[I3]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32> +// CHECK: return %[[VAL_3]] : vector<3x4xf32> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.extractelement +//===----------------------------------------------------------------------===// + +func.func @extractelement_from_vec_0d_f32(%arg0: vector) -> f32 { + %1 = vector.extractelement %arg0[] : vector + return %1 : f32 +} +// CHECK-LABEL: @extractelement_from_vec_0d_f32 +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: llvm.extractelement %{{.*}}[%[[C0]] : {{.*}}] : vector<1xf32> + +// ----- + +func.func @extractelement_from_vec_1d_f32_idx_as_i32(%arg0: vector<16xf32>) -> f32 { + %0 = arith.constant 15 : i32 + %1 = vector.extractelement %arg0[%0 : i32]: vector<16xf32> + return %1 : f32 +} +// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>) +// CHECK: %[[C:.*]] = arith.constant 15 : i32 +// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[C]] : i32] : vector<16xf32> +// CHECK: return %[[X]] : f32 + +// ----- + +func.func @extractelement_from_vec_1d_f32_idx_as_i32_scalable(%arg0: vector<[16]xf32>) -> f32 { + %0 = arith.constant 15 : i32 + %1 = vector.extractelement %arg0[%0 : i32]: vector<[16]xf32> + return %1 : f32 +} +// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) +// CHECK: %[[C:.*]] = arith.constant 15 : i32 +// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[C]] : i32] : vector<[16]xf32> +// CHECK: return %[[X]] : f32 + +// ----- +func.func @extractelement_from_vec_1d_f32_idx_as_index(%arg0: vector<16xf32>) -> f32 { + %0 = arith.constant 15 : index + %1 = vector.extractelement %arg0[%0 : index]: vector<16xf32> + return %1 : f32 +} +// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_index( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>) +// CHECK: %[[C:.*]] = arith.constant 15 : index +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 +// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[I]] : i64] : vector<16xf32> +// CHECK: return %[[X]] : f32 + +// ----- + +func.func @extractelement_from_vec_1d_f32_idx_as_index_scalable(%arg0: vector<[16]xf32>) -> f32 { + %0 = arith.constant 15 : index + %1 = vector.extractelement %arg0[%0 : index]: vector<[16]xf32> + return %1 : f32 +} +// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_index_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) +// CHECK: %[[C:.*]] = arith.constant 15 : index +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 +// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[I]] : i64] : vector<[16]xf32> +// CHECK: return %[[X]] : f32 + +// ----- + +//===----------------------------------------------------------------------===// +// vector.extract +//===----------------------------------------------------------------------===// + +func.func @extract_scalar_from_vec_1d_f32(%arg0: vector<16xf32>) -> f32 { + %0 = vector.extract %arg0[15]: f32 from vector<16xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_f32 +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32> +// CHECK: return {{.*}} : f32 + + +// ----- + +func.func @extract_scalar_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> f32 { + %0 = vector.extract %arg0[15]: f32 from vector<[16]xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_scalable +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<[16]xf32> +// CHECK: return {{.*}} : f32 + +// ----- + +func.func @extract_vec_1e_from_vec_1d_f32(%arg0: vector<16xf32>) -> vector<1xf32> { + %0 = vector.extract %arg0[15]: vector<1xf32> from vector<16xf32> + return %0 : vector<1xf32> +} +// CHECK-LABEL: @extract_vec_1e_from_vec_1d_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>) +// CHECK: %[[T0:.*]] = llvm.mlir.constant(15 : i64) : i64 +// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : vector<16xf32> +// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : f32 to vector<1xf32> +// CHECK: return %[[T2]] : vector<1xf32> + +// ----- + +func.func @extract_vec_1e_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> vector<1xf32> { + %0 = vector.extract %arg0[15]: vector<1xf32> from vector<[16]xf32> + return %0 : vector<1xf32> +} +// CHECK-LABEL: @extract_vec_1e_from_vec_1d_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) +// CHECK: %[[T0:.*]] = llvm.mlir.constant(15 : i64) : i64 +// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : vector<[16]xf32> +// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : f32 to vector<1xf32> +// CHECK: return %[[T2]] : vector<1xf32> + +// ----- + +func.func @extract_scalar_from_vec_1d_index(%arg0: vector<16xindex>) -> index { + %0 = vector.extract %arg0[15]: index from vector<16xindex> + return %0 : index +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_index( +// CHECK-SAME: %[[A:.*]]: vector<16xindex>) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(15 : i64) : i64 +// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<16xi64> +// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index +// CHECK: return %[[T3]] : index + +// ----- + +func.func @extract_scalar_from_vec_1d_index_scalable(%arg0: vector<[16]xindex>) -> index { + %0 = vector.extract %arg0[15]: index from vector<[16]xindex> + return %0 : index +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_index_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xindex>) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[16]xindex> to vector<[16]xi64> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(15 : i64) : i64 +// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<[16]xi64> +// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index +// CHECK: return %[[T3]] : index + +// ----- + +func.func @extract_vec_2d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> { + %0 = vector.extract %arg0[0]: vector<3x16xf32> from vector<4x3x16xf32> + return %0 : vector<3x16xf32> +} +// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32 +// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vector<16xf32>>> +// CHECK: return {{.*}} : vector<3x16xf32> + + +// ----- + +func.func @extract_vec_2d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<3x[16]xf32> { + %0 = vector.extract %arg0[0]: vector<3x[16]xf32> from vector<4x3x[16]xf32> + return %0 : vector<3x[16]xf32> +} +// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32_scalable +// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> +// CHECK: return {{.*}} : vector<3x[16]xf32> + +// ----- + +func.func @extract_vec_1d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<16xf32> { + %0 = vector.extract %arg0[0, 0]: vector<16xf32> from vector<4x3x16xf32> + return %0 : vector<16xf32> +} +// CHECK-LABEL: @extract_vec_1d_from_vec_3d_f32 +// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<16xf32>>> +// CHECK: return {{.*}} : vector<16xf32> + +// ----- + +func.func @extract_vec_1d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<[16]xf32> { + %0 = vector.extract %arg0[0, 0]: vector<[16]xf32> from vector<4x3x[16]xf32> + return %0 : vector<[16]xf32> +} +// CHECK-LABEL: @extract_vec_1d_from_vec_3d_f32_scalable +// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> +// CHECK: return {{.*}} : vector<[16]xf32> + +// ----- + +func.func @extract_scalar_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> f32 { + %0 = vector.extract %arg0[0, 0, 0]: f32 from vector<4x3x16xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_3d_f32 +// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<16xf32>>> +// CHECK: llvm.mlir.constant(0 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32> +// CHECK: return {{.*}} : f32 + +// ----- + +func.func @extract_scalar_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> f32 { + %0 = vector.extract %arg0[0, 0, 0]: f32 from vector<4x3x[16]xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_3d_f32_scalable +// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> +// CHECK: llvm.mlir.constant(0 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<[16]xf32> +// CHECK: return {{.*}} : f32 + +// ----- + +func.func @extract_scalar_from_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[%arg1]: f32 from vector<16xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_dynamic_idx +// CHECK-SAME: %[[VEC:.+]]: vector<16xf32>, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<16xf32> + +// ----- + +func.func @extract_scalar_from_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[%arg1]: f32 from vector<[16]xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_dynamic_idx_scalable +// CHECK-SAME: %[[VEC:.+]]: vector<[16]xf32>, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<[16]xf32> + +// ----- + +func.func @extract_scalar_from_vec_2d_f32_inner_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[0, %arg1]: f32 from vector<1x16xf32> + return %0 : f32 +} + +// Lowering supports extracting from multi-dim vectors with dynamic indices +// provided that only the trailing index is dynamic. + +// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_inner_dynamic_idx( +// CHECK: llvm.extractvalue +// CHECK: llvm.extractelement + +func.func @extract_scalar_from_vec_2d_f32_inner_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[0, %arg1]: f32 from vector<1x[16]xf32> + return %0 : f32 +} + +// Lowering supports extracting from multi-dim vectors with dynamic indices +// provided that only the trailing index is dynamic. + +// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_inner_dynamic_idx_scalable( +// CHECK: llvm.extractvalue +// CHECK: llvm.extractelement + +// ----- + +func.func @extract_scalar_from_vec_2d_f32_outer_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[%arg1, 0]: f32 from vector<1x16xf32> + return %0 : f32 +} + +// Lowering supports extracting from multi-dim vectors with dynamic indices +// provided that only the trailing index is dynamic. + +// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_outer_dynamic_idx( +// CHECK: vector.extract + +func.func @extract_scalar_from_vec_2d_f32_outer_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[%arg1, 0]: f32 from vector<1x[16]xf32> + return %0 : f32 +} + +// Lowering does not support extracting from multi-dim vectors with non trailing +// dynamic index, but it shouldn't crash. + +// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_outer_dynamic_idx_scalable( +// CHECK: vector.extract + +// ----- + +func.func @extract_scalar_from_vec_0d_index(%arg0: vector) -> index { + %0 = vector.extract %arg0[]: index from vector + return %0 : index +} +// CHECK-LABEL: @extract_scalar_from_vec_0d_index( +// CHECK-SAME: %[[A:.*]]: vector) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xi64> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<1xi64> +// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index +// CHECK: return %[[T3]] : index + +// ----- + +//===----------------------------------------------------------------------===// +// vector.insertelement +//===----------------------------------------------------------------------===// + +func.func @insertelement_into_vec_0d_f32(%arg0: f32, %arg1: vector) -> vector { + %1 = vector.insertelement %arg0, %arg1[] : vector + return %1 : vector +} +// CHECK-LABEL: @insertelement_into_vec_0d_f32 +// CHECK-SAME: %[[A:.*]]: f32, +// CHECK: %[[B:.*]] = builtin.unrealized_conversion_cast %{{.*}} : +// CHECK: vector to vector<1xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C0]] : {{.*}}] : vector<1xf32> + +// ----- + +func.func @insertelement_into_vec_1d_f32_idx_as_i32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { + %0 = arith.constant 3 : i32 + %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<4xf32> + return %1 : vector<4xf32> +} +// CHECK-LABEL: @insertelement_into_vec_1d_f32_idx_as_i32( +// CHECK-SAME: %[[A:.*]]: f32, +// CHECK-SAME: %[[B:.*]]: vector<4xf32>) +// CHECK: %[[C:.*]] = arith.constant 3 : i32 +// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C]] : i32] : vector<4xf32> +// CHECK: return %[[X]] : vector<4xf32> + +// ----- + +func.func @insertelement_into_vec_1d_f32_idx_as_i32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { + %0 = arith.constant 3 : i32 + %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<[4]xf32> + return %1 : vector<[4]xf32> +} +// CHECK-LABEL: @insertelement_into_vec_1d_f32_idx_as_i32_scalable( +// CHECK-SAME: %[[A:.*]]: f32, +// CHECK-SAME: %[[B:.*]]: vector<[4]xf32>) +// CHECK: %[[C:.*]] = arith.constant 3 : i32 +// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C]] : i32] : vector<[4]xf32> +// CHECK: return %[[X]] : vector<[4]xf32> + +// ----- + +func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { + %0 = arith.constant 3 : index + %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<4xf32> + return %1 : vector<4xf32> +} +// CHECK-LABEL: @insertelement_into_vec_1d_f32_scalable_idx_as_index( +// CHECK-SAME: %[[A:.*]]: f32, +// CHECK-SAME: %[[B:.*]]: vector<4xf32>) +// CHECK: %[[C:.*]] = arith.constant 3 : index +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 +// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[I]] : i64] : vector<4xf32> +// CHECK: return %[[X]] : vector<4xf32> + +// ----- + +func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { + %0 = arith.constant 3 : index + %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<[4]xf32> + return %1 : vector<[4]xf32> +} +// CHECK-LABEL: @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable( +// CHECK-SAME: %[[A:.*]]: f32, +// CHECK-SAME: %[[B:.*]]: vector<[4]xf32>) +// CHECK: %[[C:.*]] = arith.constant 3 : index +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 +// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[I]] : i64] : vector<[4]xf32> +// CHECK: return %[[X]] : vector<[4]xf32> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.insert +//===----------------------------------------------------------------------===// + +func.func @insert_scalar_into_vec_1d_f32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { + %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32> + return %0 : vector<4xf32> +} +// CHECK-LABEL: @insert_scalar_into_vec_1d_f32 +// CHECK: llvm.mlir.constant(3 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32> +// CHECK: return {{.*}} : vector<4xf32> + +// ----- + +func.func @insert_scalar_into_vec_1d_f32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { + %0 = vector.insert %arg0, %arg1[3] : f32 into vector<[4]xf32> + return %0 : vector<[4]xf32> +} +// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_scalable +// CHECK: llvm.mlir.constant(3 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<[4]xf32> +// CHECK: return {{.*}} : vector<[4]xf32> + +// ----- + +func.func @insert_scalar_into_vec_1d_index(%arg0: index, %arg1: vector<4xindex>) -> vector<4xindex> { + %0 = vector.insert %arg0, %arg1[3] : index into vector<4xindex> + return %0 : vector<4xindex> +} +// CHECK-LABEL: @insert_scalar_into_vec_1d_index( +// CHECK-SAME: %[[A:.*]]: index, +// CHECK-SAME: %[[B:.*]]: vector<4xindex>) +// CHECK-DAG: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 +// CHECK-DAG: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<4xindex> to vector<4xi64> +// CHECK: %[[T3:.*]] = llvm.mlir.constant(3 : i64) : i64 +// CHECK: %[[T4:.*]] = llvm.insertelement %[[T0]], %[[T1]][%[[T3]] : i64] : vector<4xi64> +// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : vector<4xi64> to vector<4xindex> +// CHECK: return %[[T5]] : vector<4xindex> + +// ----- + +func.func @insert_scalar_into_vec_1d_index_scalable(%arg0: index, %arg1: vector<[4]xindex>) -> vector<[4]xindex> { + %0 = vector.insert %arg0, %arg1[3] : index into vector<[4]xindex> + return %0 : vector<[4]xindex> +} +// CHECK-LABEL: @insert_scalar_into_vec_1d_index_scalable( +// CHECK-SAME: %[[A:.*]]: index, +// CHECK-SAME: %[[B:.*]]: vector<[4]xindex>) +// CHECK-DAG: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 +// CHECK-DAG: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<[4]xindex> to vector<[4]xi64> +// CHECK: %[[T3:.*]] = llvm.mlir.constant(3 : i64) : i64 +// CHECK: %[[T4:.*]] = llvm.insertelement %[[T0]], %[[T1]][%[[T3]] : i64] : vector<[4]xi64> +// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : vector<[4]xi64> to vector<[4]xindex> +// CHECK: return %[[T5]] : vector<[4]xindex> + +// ----- + +func.func @insert_vec_2d_into_vec_3d_f32(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { + %0 = vector.insert %arg0, %arg1[3] : vector<8x16xf32> into vector<4x8x16xf32> + return %0 : vector<4x8x16xf32> +} +// CHECK-LABEL: @insert_vec_2d_into_vec_3d_f32 +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vector<16xf32>>> +// CHECK: return {{.*}} : vector<4x8x16xf32> + +// ----- + +func.func @insert_vec_2d_into_vec_3d_f32_scalable(%arg0: vector<8x[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { + %0 = vector.insert %arg0, %arg1[3] : vector<8x[16]xf32> into vector<4x8x[16]xf32> + return %0 : vector<4x8x[16]xf32> +} +// CHECK-LABEL: @insert_vec_2d_into_vec_3d_f32_scalable +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vector<[16]xf32>>> +// CHECK: return {{.*}} : vector<4x8x[16]xf32> + +// ----- + +func.func @insert_vec_1d_into_vec_3d_f32(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { + %0 = vector.insert %arg0, %arg1[3, 7] : vector<16xf32> into vector<4x8x16xf32> + return %0 : vector<4x8x16xf32> +} +// CHECK-LABEL: @insert_vec_1d_into_vec_3d_f32 +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> +// CHECK: return {{.*}} : vector<4x8x16xf32> + +// ----- + +func.func @insert_vec_1d_into_vec_3d_f32_scalable(%arg0: vector<[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { + %0 = vector.insert %arg0, %arg1[3, 7] : vector<[16]xf32> into vector<4x8x[16]xf32> + return %0 : vector<4x8x[16]xf32> +} +// CHECK-LABEL: @insert_vec_1d_into_vec_3d_f32_scalable +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> +// CHECK: return {{.*}} : vector<4x8x[16]xf32> + +// ----- + +func.func @insert_scalar_into_vec_3d_f32(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { + %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x16xf32> + return %0 : vector<4x8x16xf32> +} +// CHECK-LABEL: @insert_scalar_into_vec_3d_f32 +// CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<16xf32> +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> +// CHECK: return {{.*}} : vector<4x8x16xf32> + +// ----- + +func.func @insert_scalar_into_vec_3d_f32_scalable(%arg0: f32, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { + %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x[16]xf32> + return %0 : vector<4x8x[16]xf32> +} +// CHECK-LABEL: @insert_scalar_into_vec_3d_f32_scalable +// CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<[16]xf32> +// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> +// CHECK: return {{.*}} : vector<4x8x[16]xf32> + +// ----- + +func.func @insert_scalar_into_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %arg1: f32, %arg2: index) + -> vector<16xf32> { + %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<16xf32> + return %0 : vector<16xf32> +} + +// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_dynamic_idx +// CHECK-SAME: %[[DST:.+]]: vector<16xf32>, %[[SRC:.+]]: f32, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<16xf32> + +// ----- + +func.func @insert_scalar_into_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: f32, %arg2: index) + -> vector<[16]xf32> { + %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<[16]xf32> + return %0 : vector<[16]xf32> +} + +// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_dynamic_idx_scalable +// CHECK-SAME: %[[DST:.+]]: vector<[16]xf32>, %[[SRC:.+]]: f32, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<[16]xf32> + +// ----- + +func.func @insert_scalar_into_vec_2d_f32_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: f32, %idx: index) + -> vector<1x16xf32> { + %0 = vector.insert %arg1, %arg0[0, %idx]: f32 into vector<1x16xf32> + return %0 : vector<1x16xf32> +} + +// Multi-dim vectors are not supported but this test shouldn't crash. + +// CHECK-LABEL: @insert_scalar_into_vec_2d_f32_dynamic_idx( +// CHECK: vector.insert + +// ----- + +func.func @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: f32, %idx: index) + -> vector<1x[16]xf32> { + %0 = vector.insert %arg1, %arg0[0, %idx]: f32 into vector<1x[16]xf32> + return %0 : vector<1x[16]xf32> +} + +// Multi-dim vectors are not supported but this test shouldn't crash. + +// CHECK-LABEL: @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable( +// CHECK: vector.insert + +// ----- + +//===----------------------------------------------------------------------===// +// vector.type_cast +// +// TODO: Add tests for for vector.type_cast that would cover scalable vectors +//===----------------------------------------------------------------------===// + +func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref> { + %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref> + return %0 : memref> +} +// CHECK-LABEL: @type_cast_f32 +// CHECK: llvm.mlir.poison : !llvm.struct<(ptr, ptr, i64)> +// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> +// CHECK: llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> +// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> +// CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> +// CHECK: llvm.mlir.constant(0 : index +// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> + +// NOTE: No test for scalable vectors - the input memref is fixed size. + +// ----- + +func.func @type_cast_index(%arg0: memref<8x8x8xindex>) -> memref> { + %0 = vector.type_cast %arg0: memref<8x8x8xindex> to memref> + return %0 : memref> +} +// CHECK-LABEL: @type_cast_index( +// CHECK-SAME: %[[A:.*]]: memref<8x8x8xindex>) +// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[A]] : memref<8x8x8xindex> to !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> + +// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %{{.*}} : !llvm.struct<(ptr, ptr, i64)> to memref> + +// NOTE: No test for scalable vectors - the input memref is fixed size. + +// ----- + +func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref, 3> { + %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref, 3> + return %0 : memref, 3> +} +// CHECK-LABEL: @type_cast_non_zero_addrspace +// CHECK: llvm.mlir.poison : !llvm.struct<(ptr<3>, ptr<3>, i64)> +// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)> +// CHECK: llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64)> +// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)> +// CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64)> +// CHECK: llvm.mlir.constant(0 : index +// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<3>, ptr<3>, i64)> + +// NOTE: No test for scalable vectors - the input memref is fixed size. + +// ----- + +//===----------------------------------------------------------------------===// +// vector.print +//===----------------------------------------------------------------------===// + +func.func @print_scalar_i64(%arg0: i64) { + vector.print %arg0 : i64 + return +} +// CHECK-LABEL: @print_scalar_i64( +// CHECK-SAME: %[[A:.*]]: i64) +// CHECK: llvm.call @printI64(%[[A]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () + +// ----- + +func.func @print_scalar_ui64(%arg0: ui64) { + vector.print %arg0 : ui64 + return +} +// CHECK-LABEL: @print_scalar_ui64( +// CHECK-SAME: %[[A:.*]]: ui64) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui64 to i64 +// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () + +// ----- + +func.func @print_scalar_index(%arg0: index) { + vector.print %arg0 : index + return +} +// CHECK-LABEL: @print_scalar_index( +// CHECK-SAME: %[[A:.*]]: index) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 +// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () + +// ----- + +func.func @print_scalar_f32(%arg0: f32) { + vector.print %arg0 : f32 + return +} +// CHECK-LABEL: @print_scalar_f32( +// CHECK-SAME: %[[A:.*]]: f32) +// CHECK: llvm.call @printF32(%[[A]]) : (f32) -> () +// CHECK: llvm.call @printNewline() : () -> () + +// ----- + +func.func @print_scalar_f64(%arg0: f64) { + vector.print %arg0 : f64 + return +} +// CHECK-LABEL: @print_scalar_f64( +// CHECK-SAME: %[[A:.*]]: f64) +// CHECK: llvm.call @printF64(%[[A]]) : (f64) -> () +// CHECK: llvm.call @printNewline() : () -> () + +// ----- + +// CHECK-LABEL: module { +// CHECK: llvm.func @printString(!llvm.ptr) +// CHECK: llvm.mlir.global private constant @[[GLOBAL_STR:.*]]({{.*}}) +// CHECK: @print_string +// CHECK-NEXT: %[[GLOBAL_ADDR:.*]] = llvm.mlir.addressof @[[GLOBAL_STR]] : !llvm.ptr +// CHECK-NEXT: %[[STR_PTR:.*]] = llvm.getelementptr %[[GLOBAL_ADDR]][0] : (!llvm.ptr) -> !llvm.ptr +// CHECK-NEXT: llvm.call @printString(%[[STR_PTR]]) : (!llvm.ptr) -> () +func.func @print_string() { + vector.print str "Hello, World!" + return +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.reduction +//===----------------------------------------------------------------------===// + +func.func @reduce_0d_f32(%arg0: vector) -> f32 { + %0 = vector.reduction , %arg0 : vector into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_0d_f32( +// CHECK-SAME: %[[A:.*]]: vector) +// CHECK: %[[CA:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[CA]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<1xf32>) -> f32 +// CHECK: return %[[V]] : f32 + +// ----- + +func.func @reduce_f16(%arg0: vector<16xf16>) -> f16 { + %0 = vector.reduction , %arg0 : vector<16xf16> into f16 + return %0 : f16 +} +// CHECK-LABEL: @reduce_f16( +// CHECK-SAME: %[[A:.*]]: vector<16xf16>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f16, vector<16xf16>) -> f16 +// CHECK: return %[[V]] : f16 + +// ----- + +func.func @reduce_f16_scalable(%arg0: vector<[16]xf16>) -> f16 { + %0 = vector.reduction , %arg0 : vector<[16]xf16> into f16 + return %0 : f16 +} +// CHECK-LABEL: @reduce_f16_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf16>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f16, vector<[16]xf16>) -> f16 +// CHECK: return %[[V]] : f16 + +// ----- + +func.func @reduce_f32(%arg0: vector<16xf32>) -> f32 { + %0 = vector.reduction , %arg0 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<16xf32>) -> f32 +// CHECK: return %[[V]] : f32 + +// ----- + +func.func @reduce_f32_scalable(%arg0: vector<[16]xf32>) -> f32 { + %0 = vector.reduction , %arg0 : vector<[16]xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<[16]xf32>) -> f32 +// CHECK: return %[[V]] : f32 + +// ----- + +func.func @reduce_f64(%arg0: vector<16xf64>) -> f64 { + %0 = vector.reduction , %arg0 : vector<16xf64> into f64 + return %0 : f64 +} +// CHECK-LABEL: @reduce_f64( +// CHECK-SAME: %[[A:.*]]: vector<16xf64>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f64, vector<16xf64>) -> f64 +// CHECK: return %[[V]] : f64 + +// ----- + +func.func @reduce_f64_scalable(%arg0: vector<[16]xf64>) -> f64 { + %0 = vector.reduction , %arg0 : vector<[16]xf64> into f64 + return %0 : f64 +} +// CHECK-LABEL: @reduce_f64_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf64>) +// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) +// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f64, vector<[16]xf64>) -> f64 +// CHECK: return %[[V]] : f64 + +// ----- + +func.func @reduce_i8(%arg0: vector<16xi8>) -> i8 { + %0 = vector.reduction , %arg0 : vector<16xi8> into i8 + return %0 : i8 +} +// CHECK-LABEL: @reduce_i8( +// CHECK-SAME: %[[A:.*]]: vector<16xi8>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i8 + +// ----- + +func.func @reduce_i8_scalable(%arg0: vector<[16]xi8>) -> i8 { + %0 = vector.reduction , %arg0 : vector<[16]xi8> into i8 + return %0 : i8 +} +// CHECK-LABEL: @reduce_i8_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi8>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i8 + +// ----- + +func.func @reduce_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.add %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.add %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_mul_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_mul_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_mul_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_mul_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_mul_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_mul_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.mul %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_mul_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_mul_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.mul %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_fmaximum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmaximum_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmaximum(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmaximum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmaximum_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmaximum(%[[A]]) : (vector<[16]xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fminimum_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fminimum(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fminimum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fminimum_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fminimum(%[[A]]) : (vector<[16]xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmax_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmax(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmax_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmax_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmax(%[[A]]) : (vector<[16]xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmin_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmin(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmin_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmin_f32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmin(%[[A]]) : (vector<[16]xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_minui_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minui_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minui_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minui_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "ule" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minui_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "ule" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxui_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxui_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxui_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxui_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "uge" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxui_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "uge" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minsi_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minsi_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minsi_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minsi_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "sle" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_minsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_minsi_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "sle" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxsi_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxsi_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxsi_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxsi_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "sge" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_maxsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_maxsi_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) +// CHECK: %[[S:.*]] = llvm.icmp "sge" %[[ACC]], %[[R]] +// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_and_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_and_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_and_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_and_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_and_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_and_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.and %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_and_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_and_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.and %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_or_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_or_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_or_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_or_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_or_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_or_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.or %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_or_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_or_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.or %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_xor_i32(%arg0: vector<16xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_xor_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_xor_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { + %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_xor_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_xor_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_xor_acc_i32( +// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.xor %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_xor_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { + %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 + return %0 : i32 +} +// CHECK-LABEL: @reduce_xor_acc_i32_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) +// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) +// CHECK: %[[V:.*]] = llvm.xor %[[ACC]], %[[R]] +// CHECK: return %[[V]] : i32 + +// ----- + +func.func @reduce_i64(%arg0: vector<16xi64>) -> i64 { + %0 = vector.reduction , %arg0 : vector<16xi64> into i64 + return %0 : i64 +} +// CHECK-LABEL: @reduce_i64( +// CHECK-SAME: %[[A:.*]]: vector<16xi64>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i64 + +// ----- + +func.func @reduce_i64_scalable(%arg0: vector<[16]xi64>) -> i64 { + %0 = vector.reduction , %arg0 : vector<[16]xi64> into i64 + return %0 : i64 +} +// CHECK-LABEL: @reduce_i64_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xi64>) +// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) +// CHECK: return %[[V]] : i64 + +// ----- + +func.func @reduce_index(%arg0: vector<16xindex>) -> index { + %0 = vector.reduction , %arg0 : vector<16xindex> into index + return %0 : index +} +// CHECK-LABEL: @reduce_index( +// CHECK-SAME: %[[A:.*]]: vector<16xindex>) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> +// CHECK: %[[T1:.*]] = "llvm.intr.vector.reduce.add"(%[[T0]]) +// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index +// CHECK: return %[[T2]] : index + +// ----- + +func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index { + %0 = vector.reduction , %arg0 : vector<[16]xindex> into index + return %0 : index +} +// CHECK-LABEL: @reduce_index_scalable( +// CHECK-SAME: %[[A:.*]]: vector<[16]xindex>) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[16]xindex> to vector<[16]xi64> +// CHECK: %[[T1:.*]] = "llvm.intr.vector.reduce.add"(%[[T0]]) +// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index +// CHECK: return %[[T2]] : index + +// ----- + +//===----------------------------------------------------------------------===// +// vector.transpose +//===----------------------------------------------------------------------===// + +func.func @transpose_0d(%arg0: vector) -> vector { + %0 = vector.transpose %arg0, [] : vector to vector + return %0 : vector +} + +// CHECK-LABEL: func @transpose_0d +// CHECK-SAME: %[[A:.*]]: vector +// CHECK: return %[[A]] : vector + +// ----- + +//===----------------------------------------------------------------------===// +// vector.load +//===----------------------------------------------------------------------===// + +func.func @load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> { + %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32> + return %0 : vector<8xf32> +} + +// CHECK-LABEL: func @load +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64} : !llvm.ptr -> vector<8xf32> + +// ----- + +func.func @load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> { + %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<[8]xf32> + return %0 : vector<[8]xf32> +} + +// CHECK-LABEL: func @load_scalable +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64} : !llvm.ptr -> vector<[8]xf32> + +// ----- + +func.func @load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> { + %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<8xf32> + return %0 : vector<8xf32> +} + +// CHECK-LABEL: func @load_nontemporal +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64, nontemporal} : !llvm.ptr -> vector<8xf32> + +// ----- + +func.func @load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> { + %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[8]xf32> + return %0 : vector<[8]xf32> +} + +// CHECK-LABEL: func @load_nontemporal_scalable +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64, nontemporal} : !llvm.ptr -> vector<[8]xf32> + +// ----- + +func.func @load_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> { + %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<8xindex> + return %0 : vector<8xindex> +} +// CHECK-LABEL: func @load_index +// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xi64> +// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<8xi64> to vector<8xindex> +// CHECK: return %[[T1]] : vector<8xindex> + +// ----- + +func.func @load_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<[8]xindex> { + %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<[8]xindex> + return %0 : vector<[8]xindex> +} +// CHECK-LABEL: func @load_index_scalable +// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<[8]xi64> +// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<[8]xi64> to vector<[8]xindex> +// CHECK: return %[[T1]] : vector<[8]xindex> + +// ----- + +func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector { + %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector + return %0 : vector +} + +// CHECK-LABEL: func @load_0d +// CHECK: %[[J:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 +// CHECK: %[[CAST_MEMREF:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref<200x100xf32> to !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> +// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %[[I]], %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %[[J]] : i64 +// CHECK: %[[ADDR:.*]] = llvm.getelementptr %[[REF]][%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[LOAD:.*]] = llvm.load %[[ADDR]] {alignment = 4 : i64} : !llvm.ptr -> vector<1xf32> +// CHECK: %[[RES:.*]] = builtin.unrealized_conversion_cast %[[LOAD]] : vector<1xf32> to vector +// CHECK: return %[[RES]] : vector + +// ----- + +//===----------------------------------------------------------------------===// +// vector.store +//===----------------------------------------------------------------------===// + +func.func @store(%memref : memref<200x100xf32>, %i : index, %j : index) { + %val = arith.constant dense<11.0> : vector<4xf32> + vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<4xf32> + return +} + +// CHECK-LABEL: func @store +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64} : vector<4xf32>, !llvm.ptr + +// ----- + +func.func @store_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) { + %val = arith.constant dense<11.0> : vector<[4]xf32> + vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<[4]xf32> + return +} + +// CHECK-LABEL: func @store_scalable +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64} : vector<[4]xf32>, !llvm.ptr + +// ----- + +func.func @store_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) { + %val = arith.constant dense<11.0> : vector<4xf32> + vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<4xf32> + return +} + +// CHECK-LABEL: func @store_nontemporal +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64, nontemporal} : vector<4xf32>, !llvm.ptr + +// ----- + +func.func @store_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) { + %val = arith.constant dense<11.0> : vector<[4]xf32> + vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[4]xf32> + return +} + +// CHECK-LABEL: func @store_nontemporal_scalable +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 +// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64, nontemporal} : vector<[4]xf32>, !llvm.ptr + +// ----- + +func.func @store_index(%memref : memref<200x100xindex>, %i : index, %j : index) { + %val = arith.constant dense<11> : vector<4xindex> + vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<4xindex> + return +} +// CHECK-LABEL: func @store_index +// CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xi64>, !llvm.ptr + +// ----- + +func.func @store_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) { + %val = arith.constant dense<11> : vector<[4]xindex> + vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<[4]xindex> + return +} +// CHECK-LABEL: func @store_index_scalable +// CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<[4]xi64>, !llvm.ptr + +// ----- + +func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) { + %val = arith.constant dense<11.0> : vector + vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector + return +} + +// CHECK-LABEL: func @store_0d +// CHECK: %[[J:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 +// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 +// CHECK: %[[CAST_MEMREF:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref<200x100xf32> to !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> +// CHECK: %[[CST:.*]] = arith.constant dense<1.100000e+01> : vector +// CHECK: %[[VAL:.*]] = builtin.unrealized_conversion_cast %[[CST]] : vector to vector<1xf32> +// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> +// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 +// CHECK: %[[MUL:.*]] = llvm.mul %[[I]], %[[C100]] : i64 +// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %[[J]] : i64 +// CHECK: %[[ADDR:.*]] = llvm.getelementptr %[[REF]][%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.store %[[VAL]], %[[ADDR]] {alignment = 4 : i64} : vector<1xf32>, !llvm.ptr +// CHECK: return + +// ----- + +//===----------------------------------------------------------------------===// +// vector.maskedload +//===----------------------------------------------------------------------===// + +func.func @masked_load(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> { + %c0 = arith.constant 0: index + %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xf32> into vector<16xf32> + return %0 : vector<16xf32> +} + +// CHECK-LABEL: func @masked_load +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32> +// CHECK: return %[[L]] : vector<16xf32> + +// ----- + +func.func @masked_load_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xf32>) -> vector<[16]xf32> { + %c0 = arith.constant 0: index + %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xf32> into vector<[16]xf32> + return %0 : vector<[16]xf32> +} + +// CHECK-LABEL: func @masked_load_scalable +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr, vector<[16]xi1>, vector<[16]xf32>) -> vector<[16]xf32> +// CHECK: return %[[L]] : vector<[16]xf32> + +// ----- + +func.func @masked_load_index(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xindex>) -> vector<16xindex> { + %c0 = arith.constant 0: index + %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xindex> into vector<16xindex> + return %0 : vector<16xindex> +} +// CHECK-LABEL: func @masked_load_index +// CHECK: %{{.*}} = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xi64>) -> vector<16xi64> + +// ----- + +func.func @masked_load_index_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xindex>) -> vector<[16]xindex> { + %c0 = arith.constant 0: index + %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xindex> into vector<[16]xindex> + return %0 : vector<[16]xindex> +} +// CHECK-LABEL: func @masked_load_index_scalable +// CHECK: %{{.*}} = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<[16]xi1>, vector<[16]xi64>) -> vector<[16]xi64> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.maskedstore +//===----------------------------------------------------------------------===// + +func.func @masked_store(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xf32>) { + %c0 = arith.constant 0: index + vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xf32> + return +} + +// CHECK-LABEL: func @masked_store +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr + +// ----- + +func.func @masked_store_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xf32>) { + %c0 = arith.constant 0: index + vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xf32> + return +} + +// CHECK-LABEL: func @masked_store_scalable +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[16]xf32>, vector<[16]xi1> into !llvm.ptr + +// ----- + +func.func @masked_store_index(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xindex>) { + %c0 = arith.constant 0: index + vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xindex> + return +} +// CHECK-LABEL: func @masked_store_index +// CHECK: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<16xi64>, vector<16xi1> into !llvm.ptr + +// ----- + +func.func @masked_store_index_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xindex>) { + %c0 = arith.constant 0: index + vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xindex> + return +} +// CHECK-LABEL: func @masked_store_index_scalable +// CHECK: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<[16]xi64>, vector<[16]xi1> into !llvm.ptr + +// ----- + +//===----------------------------------------------------------------------===// +// vector.gather +//===----------------------------------------------------------------------===// + +func.func @gather(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32> + return %1 : vector<3xf32> +} + +// CHECK-LABEL: func @gather +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: return %[[G]] : vector<3xf32> + +// ----- + +func.func @gather_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) -> vector<[3]xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> into vector<[3]xf32> + return %1 : vector<[3]xf32> +} + +// CHECK-LABEL: func @gather_scalable +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: return %[[G]] : vector<[3]xf32> + +// ----- + +func.func @gather_global_memory(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32> + return %1 : vector<3xf32> +} + +// CHECK-LABEL: func @gather_global_memory +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<3xi32>) -> !llvm.vec<3 x ptr<1>>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr<1>>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: return %[[G]] : vector<3xf32> + +// ----- + +func.func @gather_global_memory_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) -> vector<[3]xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> into vector<[3]xf32> + return %1 : vector<[3]xf32> +} + +// CHECK-LABEL: func @gather_global_memory_scalable +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<[3]xi32>) -> !llvm.vec>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: return %[[G]] : vector<[3]xf32> + +// ----- + + +func.func @gather_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) -> vector<3xindex> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xindex>, vector<3xi1>, vector<3xindex> into vector<3xindex> + return %1 : vector<3xindex> +} + +// CHECK-LABEL: func @gather_index +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xi64>) -> vector<3xi64> +// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<3xi64> to vector<3xindex> + +// ----- + +func.func @gather_index_scalable(%arg0: memref, %arg1: vector<[3]xindex>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xindex>) -> vector<[3]xindex> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xindex>, vector<[3]xi1>, vector<[3]xindex> into vector<[3]xindex> + return %1 : vector<[3]xindex> +} + +// CHECK-LABEL: func @gather_index_scalable +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xi64>) -> vector<[3]xi64> +// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<[3]xi64> to vector<[3]xindex> + +// ----- + +func.func @gather_2d_from_1d(%arg0: memref, %arg1: vector<2x3xi32>, %arg2: vector<2x3xi1>, %arg3: vector<2x3xf32>) -> vector<2x3xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<2x3xi32>, vector<2x3xi1>, vector<2x3xf32> into vector<2x3xf32> + return %1 : vector<2x3xf32> +} + +// CHECK-LABEL: func @gather_2d_from_1d +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}} : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[I0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xi32>> +// CHECK: %[[M0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xi1>> +// CHECK: %[[S0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> +// CHECK: %[[P0:.*]] = llvm.getelementptr %[[B]][%[[I0]]] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 +// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %[[P0]], %[[M0]], %[[S0]] {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %{{.*}} = llvm.insertvalue %[[G0]], %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> +// CHECK: %[[I1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xi32>> +// CHECK: %[[M1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xi1>> +// CHECK: %[[S1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> +// CHECK: %[[P1:.*]] = llvm.getelementptr %[[B]][%[[I1]]] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 +// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %[[P1]], %[[M1]], %[[S1]] {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %{{.*}} = llvm.insertvalue %[[G1]], %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> + +// ----- + +func.func @gather_2d_from_1d_scalable(%arg0: memref, %arg1: vector<2x[3]xi32>, %arg2: vector<2x[3]xi1>, %arg3: vector<2x[3]xf32>) -> vector<2x[3]xf32> { + %0 = arith.constant 0: index + %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<2x[3]xi32>, vector<2x[3]xi1>, vector<2x[3]xf32> into vector<2x[3]xf32> + return %1 : vector<2x[3]xf32> +} + +// CHECK-LABEL: func @gather_2d_from_1d_scalable +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}} : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[I0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xi32>> +// CHECK: %[[M0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xi1>> +// CHECK: %[[S0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xf32>> +// CHECK: %[[P0:.*]] = llvm.getelementptr %[[B]][%[[I0]]] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 +// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %[[P0]], %[[M0]], %[[S0]] {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %{{.*}} = llvm.insertvalue %[[G0]], %{{.*}}[0] : !llvm.array<2 x vector<[3]xf32>> +// CHECK: %[[I1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xi32>> +// CHECK: %[[M1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xi1>> +// CHECK: %[[S1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xf32>> +// CHECK: %[[P1:.*]] = llvm.getelementptr %[[B]][%[[I1]]] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 +// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %[[P1]], %[[M1]], %[[S1]] {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %{{.*}} = llvm.insertvalue %[[G1]], %{{.*}}[1] : !llvm.array<2 x vector<[3]xf32>> + +// ----- + + +func.func @gather_1d_from_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) -> vector<4xf32> { + %0 = arith.constant 3 : index + %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> into vector<4xf32> + return %1 : vector<4xf32> +} + +// CHECK-LABEL: func @gather_1d_from_2d +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<4 x ptr>, vector<4xi1>, vector<4xf32>) -> vector<4xf32> +// CHECK: return %[[G]] : vector<4xf32> + +// ----- + +func.func @gather_1d_from_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]xi32>, %arg2: vector<[4]xi1>, %arg3: vector<[4]xf32>) -> vector<[4]xf32> { + %0 = arith.constant 3 : index + %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x?xf32>, vector<[4]xi32>, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> + return %1 : vector<[4]xf32> +} + +// CHECK-LABEL: func @gather_1d_from_2d_scalable +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[4]xi1>, vector<[4]xf32>) -> vector<[4]xf32> +// CHECK: return %[[G]] : vector<[4]xf32> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.scatter +//===----------------------------------------------------------------------===// + +func.func @scatter(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) { + %0 = arith.constant 0: index + vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> + return +} + +// CHECK-LABEL: func @scatter +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<3xf32>, vector<3xi1> into !llvm.vec<3 x ptr> + +// ----- + +func.func @scatter_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) { + %0 = arith.constant 0: index + vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> + return +} + +// CHECK-LABEL: func @scatter_scalable +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[3]xf32>, vector<[3]xi1> into !llvm.vec + +// ----- + +func.func @scatter_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) { + %0 = arith.constant 0: index + vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xindex>, vector<3xi1>, vector<3xindex> + return +} + +// CHECK-LABEL: func @scatter_index +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<3xi64>, vector<3xi1> into !llvm.vec<3 x ptr> + +// ----- + +func.func @scatter_index_scalable(%arg0: memref, %arg1: vector<[3]xindex>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xindex>) { + %0 = arith.constant 0: index + vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xindex>, vector<[3]xi1>, vector<[3]xindex> + return +} + +// CHECK-LABEL: func @scatter_index_scalable +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<[3]xi64>, vector<[3]xi1> into !llvm.vec + +// ----- + +func.func @scatter_1d_into_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) { + %0 = arith.constant 3 : index + vector.scatter %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> + return +} + +// CHECK-LABEL: func @scatter_1d_into_2d +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<4xf32>, vector<4xi1> into !llvm.vec<4 x ptr> + +// ----- + +func.func @scatter_1d_into_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]xi32>, %arg2: vector<[4]xi1>, %arg3: vector<[4]xf32>) { + %0 = arith.constant 3 : index + vector.scatter %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x?xf32>, vector<[4]xi32>, vector<[4]xi1>, vector<[4]xf32> + return +} + +// CHECK-LABEL: func @scatter_1d_into_2d_scalable +// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[4]xf32>, vector<[4]xi1> into !llvm.vec + +// ----- + +//===----------------------------------------------------------------------===// +// vector.expandload +//===----------------------------------------------------------------------===// + +func.func @expand_load_op(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> { + %c0 = arith.constant 0: index + %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xf32> into vector<11xf32> + return %0 : vector<11xf32> +} + +// CHECK-LABEL: func @expand_load_op +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: %[[E:.*]] = "llvm.intr.masked.expandload"(%[[P]], %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<11xi1>, vector<11xf32>) -> vector<11xf32> +// CHECK: return %[[E]] : vector<11xf32> + +// ----- + +func.func @expand_load_op_index(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xindex>) -> vector<11xindex> { + %c0 = arith.constant 0: index + %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xindex> into vector<11xindex> + return %0 : vector<11xindex> +} +// CHECK-LABEL: func @expand_load_op_index +// CHECK: %{{.*}} = "llvm.intr.masked.expandload"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<11xi1>, vector<11xi64>) -> vector<11xi64> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.compressstore +//===----------------------------------------------------------------------===// + +func.func @compress_store_op(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xf32>) { + %c0 = arith.constant 0: index + vector.compressstore %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xf32> + return +} + +// CHECK-LABEL: func @compress_store_op +// CHECK: %[[CO:.*]] = arith.constant 0 : index +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 +// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %[[P]], %{{.*}}) : (vector<11xf32>, !llvm.ptr, vector<11xi1>) -> () + +// ----- + +func.func @compress_store_op_index(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xindex>) { + %c0 = arith.constant 0: index + vector.compressstore %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xindex> + return +} +// CHECK-LABEL: func @compress_store_op_index +// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<11xi64>, !llvm.ptr, vector<11xi1>) -> () + +// ----- + +//===----------------------------------------------------------------------===// +// vector.splat +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @splat_0d +// CHECK-SAME: %[[ELT:.*]]: f32 +func.func @splat_0d(%elt: f32) -> vector { + %v = vector.splat %elt : vector + return %v : vector +} +// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.poison : vector<1xf32> +// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : vector<1xf32> +// CHECK-NEXT: %[[VCAST:[0-9]+]] = builtin.unrealized_conversion_cast %[[V]] : vector<1xf32> to vector +// CHECK-NEXT: return %[[VCAST]] : vector + +// ----- + +// CHECK-LABEL: @splat +// CHECK-SAME: %[[VEC:[0-9a-zA-Z]+]]: vector<4xf32> +// CHECK-SAME: %[[ELT:[0-9a-zA-Z]+]]: f32 +func.func @splat(%vec: vector<4xf32>, %elt: f32) -> vector<4xf32> { + %vb = vector.splat %elt : vector<4xf32> + %r = arith.mulf %vec, %vb : vector<4xf32> + return %r : vector<4xf32> +} +// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.poison : vector<4xf32> +// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : vector<4xf32> +// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0, 0, 0, 0] +// CHECK-NEXT: %[[SCALE:[0-9]+]] = arith.mulf %[[VEC]], %[[SPLAT]] : vector<4xf32> +// CHECK-NEXT: return %[[SCALE]] : vector<4xf32> + +// ----- + +// CHECK-LABEL: @splat_scalable +// CHECK-SAME: %[[VEC:[0-9a-zA-Z]+]]: vector<[4]xf32> +// CHECK-SAME: %[[ELT:[0-9a-zA-Z]+]]: f32 +func.func @splat_scalable(%vec: vector<[4]xf32>, %elt: f32) -> vector<[4]xf32> { + %vb = vector.splat %elt : vector<[4]xf32> + %r = arith.mulf %vec, %vb : vector<[4]xf32> + return %r : vector<[4]xf32> +} +// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.poison : vector<[4]xf32> +// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : vector<[4]xf32> +// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0, 0, 0, 0] +// CHECK-NEXT: %[[SCALE:[0-9]+]] = arith.mulf %[[VEC]], %[[SPLAT]] : vector<[4]xf32> +// CHECK-NEXT: return %[[SCALE]] : vector<[4]xf32> + +// ----- + +//===----------------------------------------------------------------------===// +// vector.scalable_insert +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @scalable_insert +// CHECK-SAME: %[[SUB:.*]]: vector<4xf32>, %[[SV:.*]]: vector<[4]xf32> +func.func @scalable_insert(%sub: vector<4xf32>, %dsv: vector<[4]xf32>) -> vector<[4]xf32> { + // CHECK-NEXT: %[[TMP:.*]] = llvm.intr.vector.insert %[[SUB]], %[[SV]][0] : vector<4xf32> into vector<[4]xf32> + %0 = vector.scalable.insert %sub, %dsv[0] : vector<4xf32> into vector<[4]xf32> + // CHECK-NEXT: llvm.intr.vector.insert %[[SUB]], %[[TMP]][4] : vector<4xf32> into vector<[4]xf32> + %1 = vector.scalable.insert %sub, %0[4] : vector<4xf32> into vector<[4]xf32> + return %1 : vector<[4]xf32> +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.scalable_extract +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @scalable_extract +// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32> +func.func @scalable_extract(%vec: vector<[4]xf32>) -> vector<8xf32> { + // CHECK-NEXT: %{{.*}} = llvm.intr.vector.extract %[[VEC]][0] : vector<8xf32> from vector<[4]xf32> + %0 = vector.scalable.extract %vec[0] : vector<8xf32> from vector<[4]xf32> + return %0 : vector<8xf32> +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.interleave +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @interleave_0d +// CHECK-SAME: %[[LHS:.*]]: vector, %[[RHS:.*]]: vector) +func.func @interleave_0d(%a: vector, %b: vector) -> vector<2xi8> { + // CHECK-DAG: %[[LHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[LHS]] : vector to vector<1xi8> + // CHECK-DAG: %[[RHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[RHS]] : vector to vector<1xi8> + // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS_RANK1]], %[[RHS_RANK1]] [0, 1] : vector<1xi8> + // CHECK: return %[[ZIP]] + %0 = vector.interleave %a, %b : vector -> vector<2xi8> + return %0 : vector<2xi8> +} + +// ----- + +// CHECK-LABEL: @interleave_1d +// CHECK-SAME: %[[LHS:.*]]: vector<8xf32>, %[[RHS:.*]]: vector<8xf32>) +func.func @interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32> { + // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS]], %[[RHS]] [0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] : vector<8xf32> + // CHECK: return %[[ZIP]] + %0 = vector.interleave %a, %b : vector<8xf32> -> vector<16xf32> + return %0 : vector<16xf32> +} + +// ----- + +// CHECK-LABEL: @interleave_1d_scalable +// CHECK-SAME: %[[LHS:.*]]: vector<[4]xi32>, %[[RHS:.*]]: vector<[4]xi32>) +func.func @interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32> { + // CHECK: %[[ZIP:.*]] = "llvm.intr.vector.interleave2"(%[[LHS]], %[[RHS]]) : (vector<[4]xi32>, vector<[4]xi32>) -> vector<[8]xi32> + // CHECK: return %[[ZIP]] + %0 = vector.interleave %a, %b : vector<[4]xi32> -> vector<[8]xi32> + return %0 : vector<[8]xi32> +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.deinterleave +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @deinterleave_1d +// CHECK-SAME: (%[[ARG:.*]]: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) +func.func @deinterleave_1d(%arg: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) { + // CHECK: %[[POISON:.*]] = llvm.mlir.poison : vector<4xi32> + // CHECK: llvm.shufflevector %[[ARG]], %[[POISON]] [0, 2] : vector<4xi32> + // CHECK: llvm.shufflevector %[[ARG]], %[[POISON]] [1, 3] : vector<4xi32> + %0, %1 = vector.deinterleave %arg : vector<4xi32> -> vector<2xi32> + return %0, %1 : vector<2xi32>, vector<2xi32> +} + +// ----- + +// CHECK-LABEL: @deinterleave_1d_scalable +// CHECK-SAME: %[[ARG:.*]]: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) +func.func @deinterleave_1d_scalable(%arg: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) { + // CHECK: %[[RES:.*]] = "llvm.intr.vector.deinterleave2"(%[[ARG]]) : (vector<[4]xi32>) -> !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> + // CHECK: llvm.extractvalue %[[RES]][0] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> + // CHECK: llvm.extractvalue %[[RES]][1] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> + %0, %1 = vector.deinterleave %arg : vector<[4]xi32> -> vector<[2]xi32> + return %0, %1 : vector<[2]xi32>, vector<[2]xi32> +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.from_elements +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: func.func @from_elements_1d( +// CHECK-SAME: %[[ARG_0:.*]]: f32, %[[ARG_1:.*]]: f32) +// CHECK: %[[UNDEF:.*]] = llvm.mlir.poison : vector<3xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[UNDEF]][%[[C0]] : i64] : vector<3xf32> +// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[INSERT1:.*]] = llvm.insertelement %[[ARG_1]], %[[INSERT0]][%[[C1]] : i64] : vector<3xf32> +// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64 +// CHECK: %[[INSERT2:.*]] = llvm.insertelement %[[ARG_0]], %[[INSERT1]][%[[C2]] : i64] : vector<3xf32> +// CHECK: return %[[INSERT2]] +func.func @from_elements_1d(%arg0: f32, %arg1: f32) -> vector<3xf32> { + %0 = vector.from_elements %arg0, %arg1, %arg0 : vector<3xf32> + return %0 : vector<3xf32> +} + +// ----- + +// CHECK-LABEL: func.func @from_elements_0d( +// CHECK-SAME: %[[ARG_0:.*]]: f32) +// CHECK: %[[UNDEF:.*]] = llvm.mlir.poison : vector<1xf32> +// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[UNDEF]][%[[C0]] : i64] : vector<1xf32> +// CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[INSERT0]] : vector<1xf32> to vector +// CHECK: return %[[CAST]] +func.func @from_elements_0d(%arg0: f32) -> vector { + %0 = vector.from_elements %arg0 : vector + return %0 : vector +} + +// ----- + +//===----------------------------------------------------------------------===// +// vector.step +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: @step_scalable +// CHECK: %[[STEPVECTOR:.*]] = llvm.intr.stepvector : vector<[4]xi64> +// CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex> +// CHECK: return %[[CAST]] : vector<[4]xindex> +func.func @step_scalable() -> vector<[4]xindex> { + %0 = vector.step : vector<[4]xindex> + return %0 : vector<[4]xindex> +} diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index a5ca2b3764ffd..36b37a137ac1e 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -1,92 +1,21 @@ // RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s +//===========================================================================// +// Complex tests for Vector-to-LLVM conversion +// +// These examples, in order to convert to LLVM, require +// * `populateVectorToLLVMConversionPatterns`. +// as well as various other patterns/conversion that are part of +// `ConvertVectorToLLVMPass`. +// +// Please, in the first instance, always try adding tests in +// vector-to-llvm-interface.mlir instead. +//===========================================================================// + //===----------------------------------------------------------------------===// // vector.bitcast //===----------------------------------------------------------------------===// -func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector) -> vector { - %0 = vector.bitcast %arg0 : vector to vector - return %0 : vector -} - -// CHECK-LABEL: @bitcast_f32_to_i32_vector_0d -// CHECK-SAME: %[[ARG_0:.*]]: vector -// CHECK: %[[VEC_F32_1D:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector to vector<1xf32> -// CHECK: %[[VEC_I32_1D:.*]] = llvm.bitcast %[[VEC_F32_1D]] : vector<1xf32> to vector<1xi32> -// CHECK: %[[VEC_I32_0D:.*]] = builtin.unrealized_conversion_cast %[[VEC_I32_1D]] : vector<1xi32> to vector -// CHECK: return %[[VEC_I32_0D]] : vector - -// ----- - -func.func @bitcast_f32_to_i32_vector(%arg0: vector<16xf32>) -> vector<16xi32> { - %0 = vector.bitcast %arg0 : vector<16xf32> to vector<16xi32> - return %0 : vector<16xi32> -} - - -// CHECK-LABEL: @bitcast_f32_to_i32_vector -// CHECK-SAME: %[[ARG_0:.*]]: vector<16xf32> -// CHECK: llvm.bitcast %[[ARG_0]] : vector<16xf32> to vector<16xi32> - -// ----- - -func.func @bitcast_f32_to_i32_vector_scalable(%arg0: vector<[16]xf32>) -> vector<[16]xi32> { - %0 = vector.bitcast %arg0 : vector<[16]xf32> to vector<[16]xi32> - return %0 : vector<[16]xi32> -} - -// CHECK-LABEL: @bitcast_f32_to_i32_vector_scalable -// CHECK-SAME: %[[ARG_0:.*]]: vector<[16]xf32> -// CHECK: llvm.bitcast %[[ARG_0]] : vector<[16]xf32> to vector<[16]xi32> - -// ----- - -func.func @bitcast_i8_to_f32_vector(%arg0: vector<64xi8>) -> vector<16xf32> { - %0 = vector.bitcast %arg0 : vector<64xi8> to vector<16xf32> - return %0 : vector<16xf32> -} - -// CHECK-LABEL: @bitcast_i8_to_f32_vector -// CHECK-SAME: %[[ARG_0:.*]]: vector<64xi8> -// CHECK: llvm.bitcast %[[ARG_0]] : vector<64xi8> to vector<16xf32> - -// ----- - -func.func @bitcast_i8_to_f32_vector_scalable(%arg0: vector<[64]xi8>) -> vector<[16]xf32> { - %0 = vector.bitcast %arg0 : vector<[64]xi8> to vector<[16]xf32> - return %0 : vector<[16]xf32> -} - -// CHECK-LABEL: @bitcast_i8_to_f32_vector_scalable -// CHECK-SAME: %[[ARG_0:.*]]: vector<[64]xi8> -// CHECK: llvm.bitcast %[[ARG_0]] : vector<[64]xi8> to vector<[16]xf32> - -// ----- - -func.func @bitcast_index_to_i8_vector(%arg0: vector<16xindex>) -> vector<128xi8> { - %0 = vector.bitcast %arg0 : vector<16xindex> to vector<128xi8> - return %0 : vector<128xi8> -} - -// CHECK-LABEL: @bitcast_index_to_i8_vector -// CHECK-SAME: %[[ARG_0:.*]]: vector<16xindex> -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<16xindex> to vector<16xi64> -// CHECK: llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8> - -// ----- - -func.func @bitcast_index_to_i8_vector_scalable(%arg0: vector<[16]xindex>) -> vector<[128]xi8> { - %0 = vector.bitcast %arg0 : vector<[16]xindex> to vector<[128]xi8> - return %0 : vector<[128]xi8> -} - -// CHECK-LABEL: @bitcast_index_to_i8_vector_scalable -// CHECK-SAME: %[[ARG_0:.*]]: vector<[16]xindex> -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<[16]xindex> to vector<[16]xi64> -// CHECK: llvm.bitcast %[[T0]] : vector<[16]xi64> to vector<[128]xi8> - -// ----- - // CHECK-LABEL: func.func @bitcast_2d( // CHECK-SAME: %[[ARG_0:.*]]: vector<2x4xi32>) -> vector<2x2xi64> { // CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<2x4xi32> to !llvm.array<2 x vector<4xi32>> @@ -135,16 +64,6 @@ func.func @broadcast_vec0d_from_f32(%arg0: f32) -> vector { // ----- -func.func @broadcast_vec0d_from_vec0d(%arg0: vector) -> vector { - %0 = vector.broadcast %arg0 : vector to vector - return %0 : vector -} -// CHECK-LABEL: @broadcast_vec0d_from_vec0d( -// CHECK-SAME: %[[A:.*]]: vector) -// CHECK: return %[[A]] : vector - -// ----- - func.func @broadcast_vec1d_from_f32(%arg0: f32) -> vector<2xf32> { %0 = vector.broadcast %arg0 : f32 to vector<2xf32> return %0 : vector<2xf32> @@ -259,26 +178,6 @@ func.func @broadcast_vec3d_from_scalar_scalable(%arg0: f32) -> vector<2x3x[4]xf3 // ----- -func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> { - %0 = vector.broadcast %arg0 : vector<2xf32> to vector<2xf32> - return %0 : vector<2xf32> -} -// CHECK-LABEL: @broadcast_vec1d_from_vec1d( -// CHECK-SAME: %[[A:.*]]: vector<2xf32>) -// CHECK: return %[[A]] : vector<2xf32> - -// ----- - -func.func @broadcast_vec1d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector<[2]xf32> { - %0 = vector.broadcast %arg0 : vector<[2]xf32> to vector<[2]xf32> - return %0 : vector<[2]xf32> -} -// CHECK-LABEL: @broadcast_vec1d_from_vec1d_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) -// CHECK: return %[[A]] : vector<[2]xf32> - -// ----- - func.func @broadcast_vec2d_from_vec0d(%arg0: vector) -> vector<3x2xf32> { %0 = vector.broadcast %arg0 : vector to vector<3x2xf32> return %0 : vector<3x2xf32> @@ -286,7 +185,7 @@ func.func @broadcast_vec2d_from_vec0d(%arg0: vector) -> vector<3x2xf32> { // CHECK-LABEL: @broadcast_vec2d_from_vec0d( // CHECK-SAME: %[[A:.*]]: vector) // CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: %[[T5:.*]] = llvm.extractelement %[[T0]][%[[T4]] : i64] : vector<1xf32> @@ -306,7 +205,7 @@ func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> { } // CHECK-LABEL: @broadcast_vec2d_from_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xf32>) -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][0] : !llvm.array<3 x vector<2xf32>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vector<2xf32>> @@ -322,7 +221,7 @@ func.func @broadcast_vec2d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector } // CHECK-LABEL: @broadcast_vec2d_from_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x[2]xf32> // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][0] : !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vector<[2]xf32>> @@ -339,7 +238,7 @@ func.func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x // CHECK-LABEL: @broadcast_vec2d_from_index_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xindex>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<2xindex> to vector<2xi64> -// CHECK: %[[T0:.*]] = arith.constant dense<0> : vector<3x2xindex> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x2xindex> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xindex> to !llvm.array<3 x vector<2xi64>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<3 x vector<2xi64>> @@ -355,7 +254,7 @@ func.func @broadcast_vec2d_from_index_vec1d_scalable(%arg0: vector<[2]xindex>) - // CHECK-LABEL: @broadcast_vec2d_from_index_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xindex>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[2]xindex> to vector<[2]xi64> -// CHECK: %[[T0:.*]] = arith.constant dense<0> : vector<3x[2]xindex> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x[2]xindex> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xindex> to !llvm.array<3 x vector<[2]xi64>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<3 x vector<[2]xi64>> @@ -370,9 +269,9 @@ func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> } // CHECK-LABEL: @broadcast_vec3d_from_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xf32>) -// CHECK-DAG: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK-DAG: %[[T0:.*]] = ub.poison : vector<3x2xf32> // CHECK-DAG: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> -// CHECK-DAG: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK-DAG: %[[T1:.*]] = ub.poison : vector<4x3x2xf32> // CHECK-DAG: %[[T6:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][0] : !llvm.array<3 x vector<2xf32>> @@ -395,9 +294,9 @@ func.func @broadcast_vec3d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector } // CHECK-LABEL: @broadcast_vec3d_from_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) -// CHECK-DAG: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK-DAG: %[[T0:.*]] = ub.poison : vector<3x[2]xf32> // CHECK-DAG: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> -// CHECK-DAG: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK-DAG: %[[T1:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK-DAG: %[[T6:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][0] : !llvm.array<3 x vector<[2]xf32>> @@ -421,7 +320,7 @@ func.func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf3 // CHECK-LABEL: @broadcast_vec3d_from_vec2d( // CHECK-SAME: %[[A:.*]]: vector<3x2xf32>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<4x3x2xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T1]], %[[T3]][1] : !llvm.array<4 x array<3 x vector<2xf32>>> @@ -439,7 +338,7 @@ func.func @broadcast_vec3d_from_vec2d_scalable(%arg0: vector<3x[2]xf32>) -> vect // CHECK-LABEL: @broadcast_vec3d_from_vec2d_scalable( // CHECK-SAME: %[[A:.*]]: vector<3x[2]xf32>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T1]], %[[T3]][1] : !llvm.array<4 x array<3 x vector<[2]xf32>>> @@ -486,7 +385,7 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> // CHECK-LABEL: @broadcast_stretch_at_start( // CHECK-SAME: %[[A:.*]]: vector<1x4xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x4xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x4xf32> // CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x4xf32> to !llvm.array<3 x vector<4xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<1 x vector<4xf32>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T3]], %[[T4]][0] : !llvm.array<3 x vector<4xf32>> @@ -504,7 +403,7 @@ func.func @broadcast_stretch_at_start_scalable(%arg0: vector<1x[4]xf32>) -> vect // CHECK-LABEL: @broadcast_stretch_at_start_scalable( // CHECK-SAME: %[[A:.*]]: vector<1x[4]xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x[4]xf32> to !llvm.array<1 x vector<[4]xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x[4]xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x[4]xf32> // CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x[4]xf32> to !llvm.array<3 x vector<[4]xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<1 x vector<[4]xf32>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T3]], %[[T4]][0] : !llvm.array<3 x vector<[4]xf32>> @@ -522,7 +421,7 @@ func.func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> { // CHECK-LABEL: @broadcast_stretch_at_end( // CHECK-SAME: %[[A:.*]]: vector<4x1xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3xf32> // CHECK: %[[T7:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3xf32> to !llvm.array<4 x vector<3xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<4 x vector<1xf32>> // CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i64) : i64 @@ -570,9 +469,9 @@ func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2 // CHECK-LABEL: @broadcast_stretch_in_middle( // CHECK-SAME: %[[A:.*]]: vector<4x1x2xf32>) -> vector<4x3x2xf32> { // CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3x2xf32> // CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> -// CHECK: %[[T2:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T2:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T4:.*]] = llvm.extractvalue %[[T3]][0, 0] : !llvm.array<4 x array<1 x vector<2xf32>>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0] : !llvm.array<3 x vector<2xf32>> @@ -606,9 +505,9 @@ func.func @broadcast_stretch_in_middle_scalable_v1(%arg0: vector<4x1x[2]xf32>) - // CHECK-LABEL: @broadcast_stretch_in_middle_scalable_v1( // CHECK-SAME: %[[A:.*]]: vector<4x1x[2]xf32>) -> vector<4x3x[2]xf32> { // CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1x[2]xf32> to !llvm.array<4 x array<1 x vector<[2]xf32>>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> -// CHECK: %[[T2:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK: %[[T2:.*]] = ub.poison : vector<3x[2]xf32> // CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T4:.*]] = llvm.extractvalue %[[T3]][0, 0] : !llvm.array<4 x array<1 x vector<[2]xf32>>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0] : !llvm.array<3 x vector<[2]xf32>> @@ -1068,1023 +967,255 @@ func.func @masked_int_or_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, // ----- //===----------------------------------------------------------------------===// -// vector.shuffle +// vector.extract //===----------------------------------------------------------------------===// -func.func @shuffle_0D_direct(%arg0: vector) -> vector<3xf32> { - %1 = vector.shuffle %arg0, %arg0 [0, 1, 0] : vector, vector - return %1 : vector<3xf32> +// FIXME: Segfaults for --convert-to-llvm="filter-dialects=vector" +func.func @extract_scalar_from_vec_1d_f32_poison_idx(%arg0: vector<16xf32>) -> f32 { + %0 = vector.extract %arg0[-1]: f32 from vector<16xf32> + return %0 : f32 } -// CHECK-LABEL: @shuffle_0D_direct( -// CHECK-SAME: %[[A:.*]]: vector -// CHECK: %[[c:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> -// CHECK: %[[s:.*]] = llvm.shufflevector %[[c]], %[[c]] [0, 1, 0] : vector<1xf32> -// CHECK: return %[[s]] : vector<3xf32> +// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_poison_idx +// CHECK: %[[UB:.*]] = ub.poison : f32 +// CHECK: return %[[UB]] : f32 // ----- -func.func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2xf32> { - %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xf32>, vector<2xf32> - return %1 : vector<2xf32> +// FIXME: Segfaults for --convert-to-llvm="filter-dialects=vector" +func.func @extract_vec_2d_from_vec_3d_f32_poison_idx(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> { + %0 = vector.extract %arg0[-1]: vector<3x16xf32> from vector<4x3x16xf32> + return %0 : vector<3x16xf32> } -// CHECK-LABEL: @shuffle_1D_direct( -// CHECK-SAME: %[[A:.*]]: vector<2xf32>, -// CHECK-SAME: %[[B:.*]]: vector<2xf32>) -// CHECK: return %[[A:.*]]: vector<2xf32> +// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32_poison_idx +// CHECK: %[[UB:.*]] = ub.poison : vector<3x16xf32> +// CHECK: return %[[UB]] : vector<3x16xf32> // ----- -func.func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) -> vector<2xindex> { - %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xindex>, vector<2xindex> - return %1 : vector<2xindex> +//===----------------------------------------------------------------------===// +// vector.print +//===----------------------------------------------------------------------===// + +func.func @print_scalar_i1(%arg0: i1) { + vector.print %arg0 : i1 + return } -// CHECK-LABEL: @shuffle_1D_index_direct( -// CHECK-SAME: %[[A:.*]]: vector<2xindex>, -// CHECK-SAME: %[[B:.*]]: vector<2xindex>) -// CHECK: return %[[A:.*]]: vector<2xindex> +// +// Type "boolean" always uses zero extension. +// +// CHECK-LABEL: @print_scalar_i1( +// CHECK-SAME: %[[A:.*]]: i1) +// CHECK: %[[S:.*]] = arith.extui %[[A]] : i1 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @shuffle_poison_mask(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<4xf32> { - %1 = vector.shuffle %arg0, %arg1 [0, -1, 3, -1] : vector<2xf32>, vector<2xf32> - return %1 : vector<4xf32> +func.func @print_scalar_i4(%arg0: i4) { + vector.print %arg0 : i4 + return } -// CHECK-LABEL: @shuffle_poison_mask( -// CHECK-SAME: %[[A:.*]]: vector<2xf32>, %[[B:.*]]: vector<2xf32>) -// CHECK: %[[s:.*]] = llvm.shufflevector %[[A]], %[[B]] [0, -1, 3, -1] : vector<2xf32> +// CHECK-LABEL: @print_scalar_i4( +// CHECK-SAME: %[[A:.*]]: i4) +// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i4 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> { - %1 = vector.shuffle %arg0, %arg1 [4, 3, 2, 1, 0] : vector<2xf32>, vector<3xf32> - return %1 : vector<5xf32> +func.func @print_scalar_si4(%arg0: si4) { + vector.print %arg0 : si4 + return } -// CHECK-LABEL: @shuffle_1D( -// CHECK-SAME: %[[A:.*]]: vector<2xf32>, -// CHECK-SAME: %[[B:.*]]: vector<3xf32>) -// CHECK: %[[U0:.*]] = llvm.mlir.poison : vector<5xf32> -// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64 -// CHECK: %[[E1:.*]] = llvm.extractelement %[[B]][%[[C2]] : i64] : vector<3xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: %[[I1:.*]] = llvm.insertelement %[[E1]], %[[U0]][%[[C0]] : i64] : vector<5xf32> -// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: %[[E2:.*]] = llvm.extractelement %[[B]][%[[C1]] : i64] : vector<3xf32> -// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: %[[I2:.*]] = llvm.insertelement %[[E2]], %[[I1]][%[[C1]] : i64] : vector<5xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: %[[E3:.*]] = llvm.extractelement %[[B]][%[[C0]] : i64] : vector<3xf32> -// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64 -// CHECK: %[[I3:.*]] = llvm.insertelement %[[E3]], %[[I2]][%[[C2]] : i64] : vector<5xf32> -// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: %[[E4:.*]] = llvm.extractelement %[[A]][%[[C1]] : i64] : vector<2xf32> -// CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : index) : i64 -// CHECK: %[[I4:.*]] = llvm.insertelement %[[E4]], %[[I3]][%[[C3]] : i64] : vector<5xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: %[[E5:.*]] = llvm.extractelement %[[A]][%[[C0]] : i64] : vector<2xf32> -// CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : index) : i64 -// CHECK: %[[I5:.*]] = llvm.insertelement %[[E5]], %[[I4]][%[[C4]] : i64] : vector<5xf32> -// CHECK: return %[[I5]] : vector<5xf32> +// CHECK-LABEL: @print_scalar_si4( +// CHECK-SAME: %[[A:.*]]: si4) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : si4 to i4 +// CHECK: %[[S:.*]] = arith.extsi %[[C]] : i4 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> { - %1 = vector.shuffle %a, %b[1, 0, 2] : vector<1x4xf32>, vector<2x4xf32> - return %1 : vector<3x4xf32> +func.func @print_scalar_ui4(%arg0: ui4) { + vector.print %arg0 : ui4 + return } -// CHECK-LABEL: @shuffle_2D( -// CHECK-SAME: %[[A:.*]]: vector<1x4xf32>, -// CHECK-SAME: %[[B:.*]]: vector<2x4xf32>) -// CHECK-DAG: %[[VAL_0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>> -// CHECK-DAG: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>> -// CHECK: %[[U0:.*]] = llvm.mlir.poison : !llvm.array<3 x vector<4xf32>> -// CHECK: %[[E1:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.array<2 x vector<4xf32>> -// CHECK: %[[I1:.*]] = llvm.insertvalue %[[E1]], %[[U0]][0] : !llvm.array<3 x vector<4xf32>> -// CHECK: %[[E2:.*]] = llvm.extractvalue %[[VAL_0]][0] : !llvm.array<1 x vector<4xf32>> -// CHECK: %[[I2:.*]] = llvm.insertvalue %[[E2]], %[[I1]][1] : !llvm.array<3 x vector<4xf32>> -// CHECK: %[[E3:.*]] = llvm.extractvalue %[[VAL_1]][1] : !llvm.array<2 x vector<4xf32>> -// CHECK: %[[I3:.*]] = llvm.insertvalue %[[E3]], %[[I2]][2] : !llvm.array<3 x vector<4xf32>> -// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[I3]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32> -// CHECK: return %[[VAL_3]] : vector<3x4xf32> +// CHECK-LABEL: @print_scalar_ui4( +// CHECK-SAME: %[[A:.*]]: ui4) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui4 to i4 +// CHECK: %[[S:.*]] = arith.extui %[[C]] : i4 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -//===----------------------------------------------------------------------===// -// vector.extractelement -//===----------------------------------------------------------------------===// - -func.func @extractelement_from_vec_0d_f32(%arg0: vector) -> f32 { - %1 = vector.extractelement %arg0[] : vector - return %1 : f32 +func.func @print_scalar_i32(%arg0: i32) { + vector.print %arg0 : i32 + return } -// CHECK-LABEL: @extractelement_from_vec_0d_f32 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: llvm.extractelement %{{.*}}[%[[C0]] : {{.*}}] : vector<1xf32> +// CHECK-LABEL: @print_scalar_i32( +// CHECK-SAME: %[[A:.*]]: i32) +// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i32 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @extractelement_from_vec_1d_f32_idx_as_i32(%arg0: vector<16xf32>) -> f32 { - %0 = arith.constant 15 : i32 - %1 = vector.extractelement %arg0[%0 : i32]: vector<16xf32> - return %1 : f32 +func.func @print_scalar_ui32(%arg0: ui32) { + vector.print %arg0 : ui32 + return } -// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>) -// CHECK: %[[C:.*]] = arith.constant 15 : i32 -// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[C]] : i32] : vector<16xf32> -// CHECK: return %[[X]] : f32 +// CHECK-LABEL: @print_scalar_ui32( +// CHECK-SAME: %[[A:.*]]: ui32) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui32 to i32 +// CHECK: %[[S:.*]] = arith.extui %[[C]] : i32 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () // ----- -func.func @extractelement_from_vec_1d_f32_idx_as_i32_scalable(%arg0: vector<[16]xf32>) -> f32 { - %0 = arith.constant 15 : i32 - %1 = vector.extractelement %arg0[%0 : i32]: vector<[16]xf32> - return %1 : f32 +func.func @print_scalar_i40(%arg0: i40) { + vector.print %arg0 : i40 + return } -// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) -// CHECK: %[[C:.*]] = arith.constant 15 : i32 -// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[C]] : i32] : vector<[16]xf32> -// CHECK: return %[[X]] : f32 +// CHECK-LABEL: @print_scalar_i40( +// CHECK-SAME: %[[A:.*]]: i40) +// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i40 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @extractelement_from_vec_1d_f32_idx_as_index(%arg0: vector<16xf32>) -> f32 { - %0 = arith.constant 15 : index - %1 = vector.extractelement %arg0[%0 : index]: vector<16xf32> - return %1 : f32 + +func.func @print_scalar_si40(%arg0: si40) { + vector.print %arg0 : si40 + return } -// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_index( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>) -// CHECK: %[[C:.*]] = arith.constant 15 : index -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 -// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[I]] : i64] : vector<16xf32> -// CHECK: return %[[X]] : f32 +// CHECK-LABEL: @print_scalar_si40( +// CHECK-SAME: %[[A:.*]]: si40) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : si40 to i40 +// CHECK: %[[S:.*]] = arith.extsi %[[C]] : i40 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- -func.func @extractelement_from_vec_1d_f32_idx_as_index_scalable(%arg0: vector<[16]xf32>) -> f32 { - %0 = arith.constant 15 : index - %1 = vector.extractelement %arg0[%0 : index]: vector<[16]xf32> - return %1 : f32 +func.func @print_scalar_ui40(%arg0: ui40) { + vector.print %arg0 : ui40 + return } -// CHECK-LABEL: @extractelement_from_vec_1d_f32_idx_as_index_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) -// CHECK: %[[C:.*]] = arith.constant 15 : index -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 -// CHECK: %[[X:.*]] = llvm.extractelement %[[A]][%[[I]] : i64] : vector<[16]xf32> -// CHECK: return %[[X]] : f32 +// CHECK-LABEL: @print_scalar_ui40( +// CHECK-SAME: %[[A:.*]]: ui40) +// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui40 to i40 +// CHECK: %[[S:.*]] = arith.extui %[[C]] : i40 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () +// CHECK: llvm.call @printNewline() : () -> () // ----- //===----------------------------------------------------------------------===// -// vector.extract +// vector.extract_strided_slice //===----------------------------------------------------------------------===// -func.func @extract_scalar_from_vec_1d_f32(%arg0: vector<16xf32>) -> f32 { - %0 = vector.extract %arg0[15]: f32 from vector<16xf32> - return %0 : f32 +func.func @extract_strided_slice_f32_1d_from_1d(%arg0: vector<4xf32>) -> vector<2xf32> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32> + return %0 : vector<2xf32> } -// CHECK-LABEL: @extract_scalar_from_vec_1d_f32 -// CHECK: llvm.mlir.constant(15 : i64) : i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32> -// CHECK: return {{.*}} : f32 - -// ----- +// CHECK-LABEL: @extract_strided_slice_f32_1d_from_1d +// CHECK-SAME: %[[A:.*]]: vector<4xf32>) +// CHECK: %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : vector<4xf32> +// CHECK: return %[[T0]] : vector<2xf32> -func.func @extract_scalar_from_vec_1d_f32_poison_idx(%arg0: vector<16xf32>) -> f32 { - %0 = vector.extract %arg0[-1]: f32 from vector<16xf32> - return %0 : f32 -} -// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_poison_idx -// CHECK: %[[UB:.*]] = ub.poison : f32 -// CHECK: return %[[UB]] : f32 +// NOTE: For scalable vectors we could only extract vector<[4]xf32> from vector<[4]xf32>, but that would be a NOP. // ----- -func.func @extract_scalar_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> f32 { - %0 = vector.extract %arg0[15]: f32 from vector<[16]xf32> - return %0 : f32 +func.func @extract_strided_slice_index_1d_from_1d(%arg0: vector<4xindex>) -> vector<2xindex> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xindex> to vector<2xindex> + return %0 : vector<2xindex> } -// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_scalable -// CHECK: llvm.mlir.constant(15 : i64) : i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<[16]xf32> -// CHECK: return {{.*}} : f32 - -// ----- +// CHECK-LABEL: @extract_strided_slice_index_1d_from_1d +// CHECK-SAME: %[[A:.*]]: vector<4xindex>) +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4xindex> to vector<4xi64> +// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T0]] [2, 3] : vector<4xi64> +// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<2xi64> to vector<2xindex> +// CHECK: return %[[T3]] : vector<2xindex> -func.func @extract_vec_1e_from_vec_1d_f32(%arg0: vector<16xf32>) -> vector<1xf32> { - %0 = vector.extract %arg0[15]: vector<1xf32> from vector<16xf32> - return %0 : vector<1xf32> -} -// CHECK-LABEL: @extract_vec_1e_from_vec_1d_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>) -// CHECK: %[[T0:.*]] = llvm.mlir.constant(15 : i64) : i64 -// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : vector<16xf32> -// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : f32 to vector<1xf32> -// CHECK: return %[[T2]] : vector<1xf32> +// NOTE: For scalable vectors we could only extract vector<[4]xindex> from vector<[4]xindex>, but that would be a NOP. // ----- -func.func @extract_vec_1e_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> vector<1xf32> { - %0 = vector.extract %arg0[15]: vector<1xf32> from vector<[16]xf32> - return %0 : vector<1xf32> +func.func @extract_strided_slice_f32_1d_from_2d(%arg0: vector<4x8xf32>) -> vector<2x8xf32> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x8xf32> to vector<2x8xf32> + return %0 : vector<2x8xf32> } -// CHECK-LABEL: @extract_vec_1e_from_vec_1d_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) -// CHECK: %[[T0:.*]] = llvm.mlir.constant(15 : i64) : i64 -// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : vector<[16]xf32> -// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : f32 to vector<1xf32> -// CHECK: return %[[T2]] : vector<1xf32> +// CHECK-LABEL: @extract_strided_slice_f32_1d_from_2d( +// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>) +// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>> +// CHECK: %[[T0:.*]] = llvm.mlir.poison : !llvm.array<2 x vector<8xf32>> +// CHECK: %[[T1:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>> +// CHECK: %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<2 x vector<8xf32>> +// CHECK: %[[T3:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>> +// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T2]][1] : !llvm.array<2 x vector<8xf32>> +// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : !llvm.array<2 x vector<8xf32>> to vector<2x8xf32> +// CHECK: return %[[T5]] // ----- -func.func @extract_scalar_from_vec_1d_index(%arg0: vector<16xindex>) -> index { - %0 = vector.extract %arg0[15]: index from vector<16xindex> - return %0 : index +func.func @extract_strided_slice_f32_1d_from_2d_scalable(%arg0: vector<4x[8]xf32>) -> vector<2x[8]xf32> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x[8]xf32> to vector<2x[8]xf32> + return %0 : vector<2x[8]xf32> } -// CHECK-LABEL: @extract_scalar_from_vec_1d_index( -// CHECK-SAME: %[[A:.*]]: vector<16xindex>) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> -// CHECK: %[[T1:.*]] = llvm.mlir.constant(15 : i64) : i64 -// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<16xi64> -// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index -// CHECK: return %[[T3]] : index +// CHECK-LABEL: func.func @extract_strided_slice_f32_1d_from_2d_scalable( +// CHECK-SAME: %[[ARG:.*]]: vector<4x[8]xf32>) +// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x[8]xf32> to !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<2x[8]xf32> +// CHECK: %[[DST:.*]] = builtin.unrealized_conversion_cast %[[CST]] : vector<2x[8]xf32> to !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[E0:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[E1:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[I0:.*]] = llvm.insertvalue %[[E0]], %[[DST]][0] : !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[I1:.*]] = llvm.insertvalue %[[E1]], %[[I0]][1] : !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[RES:.*]] = builtin.unrealized_conversion_cast %[[I1]] : !llvm.array<2 x vector<[8]xf32>> to vector<2x[8]xf32> +// CHECK: return %[[RES]] // ----- -func.func @extract_scalar_from_vec_1d_index_scalable(%arg0: vector<[16]xindex>) -> index { - %0 = vector.extract %arg0[15]: index from vector<[16]xindex> - return %0 : index +func.func @extract_strided_slice_f32_2d_from_2d(%arg0: vector<4x8xf32>) -> vector<2x2xf32> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x8xf32> to vector<2x2xf32> + return %0 : vector<2x2xf32> } -// CHECK-LABEL: @extract_scalar_from_vec_1d_index_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xindex>) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[16]xindex> to vector<[16]xi64> -// CHECK: %[[T1:.*]] = llvm.mlir.constant(15 : i64) : i64 -// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<[16]xi64> -// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index -// CHECK: return %[[T3]] : index +// CHECK-LABEL: @extract_strided_slice_f32_2d_from_2d( +// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>) +// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>> +// CHECK: %[[VAL_2:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32> +// CHECK: %[[VAL_6:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>> +// CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>> +// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : vector<8xf32> +// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[VAL_6]][0] : !llvm.array<2 x vector<2xf32>> +// CHECK: %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>> +// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : vector<8xf32> +// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vector<2xf32>> +// CHECK: %[[VAL_12:.*]] = builtin.unrealized_conversion_cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32> +// CHECK: return %[[VAL_12]] : vector<2x2xf32> // ----- -func.func @extract_vec_2d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> { - %0 = vector.extract %arg0[0]: vector<3x16xf32> from vector<4x3x16xf32> - return %0 : vector<3x16xf32> +// NOTE: For scalable vectors, we can only extract "full" scalable dimensions +// (e.g. [8] from [8], but not [4] from [8]). + +func.func @extract_strided_slice_f32_2d_from_2d_scalable(%arg0: vector<4x[8]xf32>) -> vector<2x[8]xf32> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x[8]xf32> to vector<2x[8]xf32> + return %0 : vector<2x[8]xf32> } -// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32 -// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vector<16xf32>>> -// CHECK: return {{.*}} : vector<3x16xf32> - -// ----- - -func.func @extract_vec_2d_from_vec_3d_f32_poison_idx(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> { - %0 = vector.extract %arg0[-1]: vector<3x16xf32> from vector<4x3x16xf32> - return %0 : vector<3x16xf32> -} -// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32_poison_idx -// CHECK: %[[UB:.*]] = ub.poison : vector<3x16xf32> -// CHECK: return %[[UB]] : vector<3x16xf32> - -// ----- - -func.func @extract_vec_2d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<3x[16]xf32> { - %0 = vector.extract %arg0[0]: vector<3x[16]xf32> from vector<4x3x[16]xf32> - return %0 : vector<3x[16]xf32> -} -// CHECK-LABEL: @extract_vec_2d_from_vec_3d_f32_scalable -// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> -// CHECK: return {{.*}} : vector<3x[16]xf32> - -// ----- - -func.func @extract_vec_1d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<16xf32> { - %0 = vector.extract %arg0[0, 0]: vector<16xf32> from vector<4x3x16xf32> - return %0 : vector<16xf32> -} -// CHECK-LABEL: @extract_vec_1d_from_vec_3d_f32 -// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<16xf32>>> -// CHECK: return {{.*}} : vector<16xf32> - -// ----- - -func.func @extract_vec_1d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<[16]xf32> { - %0 = vector.extract %arg0[0, 0]: vector<[16]xf32> from vector<4x3x[16]xf32> - return %0 : vector<[16]xf32> -} -// CHECK-LABEL: @extract_vec_1d_from_vec_3d_f32_scalable -// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> -// CHECK: return {{.*}} : vector<[16]xf32> - -// ----- - -func.func @extract_scalar_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> f32 { - %0 = vector.extract %arg0[0, 0, 0]: f32 from vector<4x3x16xf32> - return %0 : f32 -} -// CHECK-LABEL: @extract_scalar_from_vec_3d_f32 -// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<16xf32>>> -// CHECK: llvm.mlir.constant(0 : i64) : i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32> -// CHECK: return {{.*}} : f32 - -// ----- - -func.func @extract_scalar_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> f32 { - %0 = vector.extract %arg0[0, 0, 0]: f32 from vector<4x3x[16]xf32> - return %0 : f32 -} -// CHECK-LABEL: @extract_scalar_from_vec_3d_f32_scalable -// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<[16]xf32>>> -// CHECK: llvm.mlir.constant(0 : i64) : i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<[16]xf32> -// CHECK: return {{.*}} : f32 - -// ----- - -func.func @extract_scalar_from_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[%arg1]: f32 from vector<16xf32> - return %0 : f32 -} -// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_dynamic_idx -// CHECK-SAME: %[[VEC:.+]]: vector<16xf32>, %[[INDEX:.+]]: index -// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 -// CHECK: llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<16xf32> - -// ----- - -func.func @extract_scalar_from_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[%arg1]: f32 from vector<[16]xf32> - return %0 : f32 -} -// CHECK-LABEL: @extract_scalar_from_vec_1d_f32_dynamic_idx_scalable -// CHECK-SAME: %[[VEC:.+]]: vector<[16]xf32>, %[[INDEX:.+]]: index -// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 -// CHECK: llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<[16]xf32> - -// ----- - -func.func @extract_scalar_from_vec_2d_f32_inner_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[0, %arg1]: f32 from vector<1x16xf32> - return %0 : f32 -} - -// Lowering supports extracting from multi-dim vectors with dynamic indices -// provided that only the trailing index is dynamic. - -// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_inner_dynamic_idx( -// CHECK: llvm.extractvalue -// CHECK: llvm.extractelement - -func.func @extract_scalar_from_vec_2d_f32_inner_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[0, %arg1]: f32 from vector<1x[16]xf32> - return %0 : f32 -} - -// Lowering supports extracting from multi-dim vectors with dynamic indices -// provided that only the trailing index is dynamic. - -// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_inner_dynamic_idx_scalable( -// CHECK: llvm.extractvalue -// CHECK: llvm.extractelement - -// ----- - -func.func @extract_scalar_from_vec_2d_f32_outer_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[%arg1, 0]: f32 from vector<1x16xf32> - return %0 : f32 -} - -// Lowering supports extracting from multi-dim vectors with dynamic indices -// provided that only the trailing index is dynamic. - -// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_outer_dynamic_idx( -// CHECK: vector.extract - -func.func @extract_scalar_from_vec_2d_f32_outer_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: index) -> f32 { - %0 = vector.extract %arg0[%arg1, 0]: f32 from vector<1x[16]xf32> - return %0 : f32 -} - -// Lowering does not support extracting from multi-dim vectors with non trailing -// dynamic index, but it shouldn't crash. - -// CHECK-LABEL: @extract_scalar_from_vec_2d_f32_outer_dynamic_idx_scalable( -// CHECK: vector.extract - -// ----- - -func.func @extract_scalar_from_vec_0d_index(%arg0: vector) -> index { - %0 = vector.extract %arg0[]: index from vector - return %0 : index -} -// CHECK-LABEL: @extract_scalar_from_vec_0d_index( -// CHECK-SAME: %[[A:.*]]: vector) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xi64> -// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<1xi64> -// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index -// CHECK: return %[[T3]] : index - -// ----- - -//===----------------------------------------------------------------------===// -// vector.insertelement -//===----------------------------------------------------------------------===// - -func.func @insertelement_into_vec_0d_f32(%arg0: f32, %arg1: vector) -> vector { - %1 = vector.insertelement %arg0, %arg1[] : vector - return %1 : vector -} -// CHECK-LABEL: @insertelement_into_vec_0d_f32 -// CHECK-SAME: %[[A:.*]]: f32, -// CHECK: %[[B:.*]] = builtin.unrealized_conversion_cast %{{.*}} : -// CHECK: vector to vector<1xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C0]] : {{.*}}] : vector<1xf32> - -// ----- - -func.func @insertelement_into_vec_1d_f32_idx_as_i32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { - %0 = arith.constant 3 : i32 - %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<4xf32> - return %1 : vector<4xf32> -} -// CHECK-LABEL: @insertelement_into_vec_1d_f32_idx_as_i32( -// CHECK-SAME: %[[A:.*]]: f32, -// CHECK-SAME: %[[B:.*]]: vector<4xf32>) -// CHECK: %[[C:.*]] = arith.constant 3 : i32 -// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C]] : i32] : vector<4xf32> -// CHECK: return %[[X]] : vector<4xf32> - -// ----- - -func.func @insertelement_into_vec_1d_f32_idx_as_i32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { - %0 = arith.constant 3 : i32 - %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<[4]xf32> - return %1 : vector<[4]xf32> -} -// CHECK-LABEL: @insertelement_into_vec_1d_f32_idx_as_i32_scalable( -// CHECK-SAME: %[[A:.*]]: f32, -// CHECK-SAME: %[[B:.*]]: vector<[4]xf32>) -// CHECK: %[[C:.*]] = arith.constant 3 : i32 -// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[C]] : i32] : vector<[4]xf32> -// CHECK: return %[[X]] : vector<[4]xf32> - -// ----- - -func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { - %0 = arith.constant 3 : index - %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<4xf32> - return %1 : vector<4xf32> -} -// CHECK-LABEL: @insertelement_into_vec_1d_f32_scalable_idx_as_index( -// CHECK-SAME: %[[A:.*]]: f32, -// CHECK-SAME: %[[B:.*]]: vector<4xf32>) -// CHECK: %[[C:.*]] = arith.constant 3 : index -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 -// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[I]] : i64] : vector<4xf32> -// CHECK: return %[[X]] : vector<4xf32> - -// ----- - -func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { - %0 = arith.constant 3 : index - %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<[4]xf32> - return %1 : vector<[4]xf32> -} -// CHECK-LABEL: @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable( -// CHECK-SAME: %[[A:.*]]: f32, -// CHECK-SAME: %[[B:.*]]: vector<[4]xf32>) -// CHECK: %[[C:.*]] = arith.constant 3 : index -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64 -// CHECK: %[[X:.*]] = llvm.insertelement %[[A]], %[[B]][%[[I]] : i64] : vector<[4]xf32> -// CHECK: return %[[X]] : vector<[4]xf32> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.insert -//===----------------------------------------------------------------------===// - -func.func @insert_scalar_into_vec_1d_f32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { - %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32> - return %0 : vector<4xf32> -} -// CHECK-LABEL: @insert_scalar_into_vec_1d_f32 -// CHECK: llvm.mlir.constant(3 : i64) : i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32> -// CHECK: return {{.*}} : vector<4xf32> - -// ----- - -func.func @insert_scalar_into_vec_1d_f32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> { - %0 = vector.insert %arg0, %arg1[3] : f32 into vector<[4]xf32> - return %0 : vector<[4]xf32> -} -// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_scalable -// CHECK: llvm.mlir.constant(3 : i64) : i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<[4]xf32> -// CHECK: return {{.*}} : vector<[4]xf32> - -// ----- - -func.func @insert_scalar_into_vec_1d_index(%arg0: index, %arg1: vector<4xindex>) -> vector<4xindex> { - %0 = vector.insert %arg0, %arg1[3] : index into vector<4xindex> - return %0 : vector<4xindex> -} -// CHECK-LABEL: @insert_scalar_into_vec_1d_index( -// CHECK-SAME: %[[A:.*]]: index, -// CHECK-SAME: %[[B:.*]]: vector<4xindex>) -// CHECK-DAG: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 -// CHECK-DAG: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<4xindex> to vector<4xi64> -// CHECK: %[[T3:.*]] = llvm.mlir.constant(3 : i64) : i64 -// CHECK: %[[T4:.*]] = llvm.insertelement %[[T0]], %[[T1]][%[[T3]] : i64] : vector<4xi64> -// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : vector<4xi64> to vector<4xindex> -// CHECK: return %[[T5]] : vector<4xindex> - -// ----- - -func.func @insert_scalar_into_vec_1d_index_scalable(%arg0: index, %arg1: vector<[4]xindex>) -> vector<[4]xindex> { - %0 = vector.insert %arg0, %arg1[3] : index into vector<[4]xindex> - return %0 : vector<[4]xindex> -} -// CHECK-LABEL: @insert_scalar_into_vec_1d_index_scalable( -// CHECK-SAME: %[[A:.*]]: index, -// CHECK-SAME: %[[B:.*]]: vector<[4]xindex>) -// CHECK-DAG: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 -// CHECK-DAG: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<[4]xindex> to vector<[4]xi64> -// CHECK: %[[T3:.*]] = llvm.mlir.constant(3 : i64) : i64 -// CHECK: %[[T4:.*]] = llvm.insertelement %[[T0]], %[[T1]][%[[T3]] : i64] : vector<[4]xi64> -// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : vector<[4]xi64> to vector<[4]xindex> -// CHECK: return %[[T5]] : vector<[4]xindex> - -// ----- - -func.func @insert_vec_2d_into_vec_3d_f32(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { - %0 = vector.insert %arg0, %arg1[3] : vector<8x16xf32> into vector<4x8x16xf32> - return %0 : vector<4x8x16xf32> -} -// CHECK-LABEL: @insert_vec_2d_into_vec_3d_f32 -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vector<16xf32>>> -// CHECK: return {{.*}} : vector<4x8x16xf32> - -// ----- - -func.func @insert_vec_2d_into_vec_3d_f32_scalable(%arg0: vector<8x[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { - %0 = vector.insert %arg0, %arg1[3] : vector<8x[16]xf32> into vector<4x8x[16]xf32> - return %0 : vector<4x8x[16]xf32> -} -// CHECK-LABEL: @insert_vec_2d_into_vec_3d_f32_scalable -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vector<[16]xf32>>> -// CHECK: return {{.*}} : vector<4x8x[16]xf32> - -// ----- - -func.func @insert_vec_1d_into_vec_3d_f32(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { - %0 = vector.insert %arg0, %arg1[3, 7] : vector<16xf32> into vector<4x8x16xf32> - return %0 : vector<4x8x16xf32> -} -// CHECK-LABEL: @insert_vec_1d_into_vec_3d_f32 -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> -// CHECK: return {{.*}} : vector<4x8x16xf32> - -// ----- - -func.func @insert_vec_1d_into_vec_3d_f32_scalable(%arg0: vector<[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { - %0 = vector.insert %arg0, %arg1[3, 7] : vector<[16]xf32> into vector<4x8x[16]xf32> - return %0 : vector<4x8x[16]xf32> -} -// CHECK-LABEL: @insert_vec_1d_into_vec_3d_f32_scalable -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> -// CHECK: return {{.*}} : vector<4x8x[16]xf32> - -// ----- - -func.func @insert_scalar_into_vec_3d_f32(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { - %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x16xf32> - return %0 : vector<4x8x16xf32> -} -// CHECK-LABEL: @insert_scalar_into_vec_3d_f32 -// CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> -// CHECK: llvm.mlir.constant(15 : i64) : i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<16xf32> -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>> -// CHECK: return {{.*}} : vector<4x8x16xf32> - -// ----- - -func.func @insert_scalar_into_vec_3d_f32_scalable(%arg0: f32, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> { - %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x[16]xf32> - return %0 : vector<4x8x[16]xf32> -} -// CHECK-LABEL: @insert_scalar_into_vec_3d_f32_scalable -// CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> -// CHECK: llvm.mlir.constant(15 : i64) : i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<[16]xf32> -// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<[16]xf32>>> -// CHECK: return {{.*}} : vector<4x8x[16]xf32> - -// ----- - -func.func @insert_scalar_into_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %arg1: f32, %arg2: index) - -> vector<16xf32> { - %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<16xf32> - return %0 : vector<16xf32> -} - -// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_dynamic_idx -// CHECK-SAME: %[[DST:.+]]: vector<16xf32>, %[[SRC:.+]]: f32, %[[INDEX:.+]]: index -// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 -// CHECK: llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<16xf32> - -// ----- - -func.func @insert_scalar_into_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: f32, %arg2: index) - -> vector<[16]xf32> { - %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<[16]xf32> - return %0 : vector<[16]xf32> -} - -// CHECK-LABEL: @insert_scalar_into_vec_1d_f32_dynamic_idx_scalable -// CHECK-SAME: %[[DST:.+]]: vector<[16]xf32>, %[[SRC:.+]]: f32, %[[INDEX:.+]]: index -// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 -// CHECK: llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<[16]xf32> - -// ----- - -func.func @insert_scalar_into_vec_2d_f32_dynamic_idx(%arg0: vector<1x16xf32>, %arg1: f32, %idx: index) - -> vector<1x16xf32> { - %0 = vector.insert %arg1, %arg0[0, %idx]: f32 into vector<1x16xf32> - return %0 : vector<1x16xf32> -} - -// Multi-dim vectors are not supported but this test shouldn't crash. - -// CHECK-LABEL: @insert_scalar_into_vec_2d_f32_dynamic_idx( -// CHECK: vector.insert - -// ----- - -func.func @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: f32, %idx: index) - -> vector<1x[16]xf32> { - %0 = vector.insert %arg1, %arg0[0, %idx]: f32 into vector<1x[16]xf32> - return %0 : vector<1x[16]xf32> -} - -// Multi-dim vectors are not supported but this test shouldn't crash. - -// CHECK-LABEL: @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable( -// CHECK: vector.insert - -// ----- - -//===----------------------------------------------------------------------===// -// vector.type_cast -// -// TODO: Add tests for for vector.type_cast that would cover scalable vectors -//===----------------------------------------------------------------------===// - -func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref> { - %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref> - return %0 : memref> -} -// CHECK-LABEL: @type_cast_f32 -// CHECK: llvm.mlir.poison : !llvm.struct<(ptr, ptr, i64)> -// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> -// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> -// CHECK: llvm.mlir.constant(0 : index -// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> - -// NOTE: No test for scalable vectors - the input memref is fixed size. - -// ----- - -func.func @type_cast_index(%arg0: memref<8x8x8xindex>) -> memref> { - %0 = vector.type_cast %arg0: memref<8x8x8xindex> to memref> - return %0 : memref> -} -// CHECK-LABEL: @type_cast_index( -// CHECK-SAME: %[[A:.*]]: memref<8x8x8xindex>) -// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[A]] : memref<8x8x8xindex> to !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> - -// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %{{.*}} : !llvm.struct<(ptr, ptr, i64)> to memref> - -// NOTE: No test for scalable vectors - the input memref is fixed size. - -// ----- - -func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref, 3> { - %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref, 3> - return %0 : memref, 3> -} -// CHECK-LABEL: @type_cast_non_zero_addrspace -// CHECK: llvm.mlir.poison : !llvm.struct<(ptr<3>, ptr<3>, i64)> -// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64)> -// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64)> -// CHECK: llvm.mlir.constant(0 : index -// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<3>, ptr<3>, i64)> - -// NOTE: No test for scalable vectors - the input memref is fixed size. - -// ----- - -//===----------------------------------------------------------------------===// -// vector.print -//===----------------------------------------------------------------------===// - -func.func @print_scalar_i1(%arg0: i1) { - vector.print %arg0 : i1 - return -} -// -// Type "boolean" always uses zero extension. -// -// CHECK-LABEL: @print_scalar_i1( -// CHECK-SAME: %[[A:.*]]: i1) -// CHECK: %[[S:.*]] = arith.extui %[[A]] : i1 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_i4(%arg0: i4) { - vector.print %arg0 : i4 - return -} -// CHECK-LABEL: @print_scalar_i4( -// CHECK-SAME: %[[A:.*]]: i4) -// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i4 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_si4(%arg0: si4) { - vector.print %arg0 : si4 - return -} -// CHECK-LABEL: @print_scalar_si4( -// CHECK-SAME: %[[A:.*]]: si4) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : si4 to i4 -// CHECK: %[[S:.*]] = arith.extsi %[[C]] : i4 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_ui4(%arg0: ui4) { - vector.print %arg0 : ui4 - return -} -// CHECK-LABEL: @print_scalar_ui4( -// CHECK-SAME: %[[A:.*]]: ui4) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui4 to i4 -// CHECK: %[[S:.*]] = arith.extui %[[C]] : i4 to i64 -// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_i32(%arg0: i32) { - vector.print %arg0 : i32 - return -} -// CHECK-LABEL: @print_scalar_i32( -// CHECK-SAME: %[[A:.*]]: i32) -// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i32 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_ui32(%arg0: ui32) { - vector.print %arg0 : ui32 - return -} -// CHECK-LABEL: @print_scalar_ui32( -// CHECK-SAME: %[[A:.*]]: ui32) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui32 to i32 -// CHECK: %[[S:.*]] = arith.extui %[[C]] : i32 to i64 -// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () - -// ----- - -func.func @print_scalar_i40(%arg0: i40) { - vector.print %arg0 : i40 - return -} -// CHECK-LABEL: @print_scalar_i40( -// CHECK-SAME: %[[A:.*]]: i40) -// CHECK: %[[S:.*]] = arith.extsi %[[A]] : i40 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_si40(%arg0: si40) { - vector.print %arg0 : si40 - return -} -// CHECK-LABEL: @print_scalar_si40( -// CHECK-SAME: %[[A:.*]]: si40) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : si40 to i40 -// CHECK: %[[S:.*]] = arith.extsi %[[C]] : i40 to i64 -// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_ui40(%arg0: ui40) { - vector.print %arg0 : ui40 - return -} -// CHECK-LABEL: @print_scalar_ui40( -// CHECK-SAME: %[[A:.*]]: ui40) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui40 to i40 -// CHECK: %[[S:.*]] = arith.extui %[[C]] : i40 to i64 -// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_i64(%arg0: i64) { - vector.print %arg0 : i64 - return -} -// CHECK-LABEL: @print_scalar_i64( -// CHECK-SAME: %[[A:.*]]: i64) -// CHECK: llvm.call @printI64(%[[A]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_ui64(%arg0: ui64) { - vector.print %arg0 : ui64 - return -} -// CHECK-LABEL: @print_scalar_ui64( -// CHECK-SAME: %[[A:.*]]: ui64) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : ui64 to i64 -// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_index(%arg0: index) { - vector.print %arg0 : index - return -} -// CHECK-LABEL: @print_scalar_index( -// CHECK-SAME: %[[A:.*]]: index) -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[A]] : index to i64 -// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_f32(%arg0: f32) { - vector.print %arg0 : f32 - return -} -// CHECK-LABEL: @print_scalar_f32( -// CHECK-SAME: %[[A:.*]]: f32) -// CHECK: llvm.call @printF32(%[[A]]) : (f32) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -func.func @print_scalar_f64(%arg0: f64) { - vector.print %arg0 : f64 - return -} -// CHECK-LABEL: @print_scalar_f64( -// CHECK-SAME: %[[A:.*]]: f64) -// CHECK: llvm.call @printF64(%[[A]]) : (f64) -> () -// CHECK: llvm.call @printNewline() : () -> () - -// ----- - -// CHECK-LABEL: module { -// CHECK: llvm.func @printString(!llvm.ptr) -// CHECK: llvm.mlir.global private constant @[[GLOBAL_STR:.*]]({{.*}}) -// CHECK: @print_string -// CHECK-NEXT: %[[GLOBAL_ADDR:.*]] = llvm.mlir.addressof @[[GLOBAL_STR]] : !llvm.ptr -// CHECK-NEXT: %[[STR_PTR:.*]] = llvm.getelementptr %[[GLOBAL_ADDR]][0] : (!llvm.ptr) -> !llvm.ptr -// CHECK-NEXT: llvm.call @printString(%[[STR_PTR]]) : (!llvm.ptr) -> () -func.func @print_string() { - vector.print str "Hello, World!" - return -} - -// ----- - -//===----------------------------------------------------------------------===// -// vector.extract_strided_slice -//===----------------------------------------------------------------------===// - -func.func @extract_strided_slice_f32_1d_from_1d(%arg0: vector<4xf32>) -> vector<2xf32> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32> - return %0 : vector<2xf32> -} -// CHECK-LABEL: @extract_strided_slice_f32_1d_from_1d -// CHECK-SAME: %[[A:.*]]: vector<4xf32>) -// CHECK: %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : vector<4xf32> -// CHECK: return %[[T0]] : vector<2xf32> - -// NOTE: For scalable vectors we could only extract vector<[4]xf32> from vector<[4]xf32>, but that would be a NOP. - -// ----- - -func.func @extract_strided_slice_index_1d_from_1d(%arg0: vector<4xindex>) -> vector<2xindex> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xindex> to vector<2xindex> - return %0 : vector<2xindex> -} -// CHECK-LABEL: @extract_strided_slice_index_1d_from_1d -// CHECK-SAME: %[[A:.*]]: vector<4xindex>) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4xindex> to vector<4xi64> -// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T0]] [2, 3] : vector<4xi64> -// CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<2xi64> to vector<2xindex> -// CHECK: return %[[T3]] : vector<2xindex> - -// NOTE: For scalable vectors we could only extract vector<[4]xindex> from vector<[4]xindex>, but that would be a NOP. - -// ----- - -func.func @extract_strided_slice_f32_1d_from_2d(%arg0: vector<4x8xf32>) -> vector<2x8xf32> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x8xf32> to vector<2x8xf32> - return %0 : vector<2x8xf32> -} -// CHECK-LABEL: @extract_strided_slice_f32_1d_from_2d( -// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>) -// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T0:.*]] = llvm.mlir.poison : !llvm.array<2 x vector<8xf32>> -// CHECK: %[[T1:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<2 x vector<8xf32>> -// CHECK: %[[T3:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T2]][1] : !llvm.array<2 x vector<8xf32>> -// CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : !llvm.array<2 x vector<8xf32>> to vector<2x8xf32> -// CHECK: return %[[T5]] - -// ----- - -func.func @extract_strided_slice_f32_1d_from_2d_scalable(%arg0: vector<4x[8]xf32>) -> vector<2x[8]xf32> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x[8]xf32> to vector<2x[8]xf32> - return %0 : vector<2x[8]xf32> -} -// CHECK-LABEL: func.func @extract_strided_slice_f32_1d_from_2d_scalable( -// CHECK-SAME: %[[ARG:.*]]: vector<4x[8]xf32>) -// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x[8]xf32> to !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<2x[8]xf32> -// CHECK: %[[DST:.*]] = builtin.unrealized_conversion_cast %[[CST]] : vector<2x[8]xf32> to !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[E0:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[E1:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[I0:.*]] = llvm.insertvalue %[[E0]], %[[DST]][0] : !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[I1:.*]] = llvm.insertvalue %[[E1]], %[[I0]][1] : !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[RES:.*]] = builtin.unrealized_conversion_cast %[[I1]] : !llvm.array<2 x vector<[8]xf32>> to vector<2x[8]xf32> -// CHECK: return %[[RES]] - -// ----- - -func.func @extract_strided_slice_f32_2d_from_2d(%arg0: vector<4x8xf32>) -> vector<2x2xf32> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x8xf32> to vector<2x2xf32> - return %0 : vector<2x2xf32> -} -// CHECK-LABEL: @extract_strided_slice_f32_2d_from_2d( -// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>) -// CHECK: %[[A:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>> -// CHECK: %[[VAL_2:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32> -// CHECK: %[[VAL_6:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>> -// CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : vector<8xf32> -// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[VAL_6]][0] : !llvm.array<2 x vector<2xf32>> -// CHECK: %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : vector<8xf32> -// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vector<2xf32>> -// CHECK: %[[VAL_12:.*]] = builtin.unrealized_conversion_cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32> -// CHECK: return %[[VAL_12]] : vector<2x2xf32> - -// ----- - -// NOTE: For scalable vectors, we can only extract "full" scalable dimensions -// (e.g. [8] from [8], but not [4] from [8]). - -func.func @extract_strided_slice_f32_2d_from_2d_scalable(%arg0: vector<4x[8]xf32>) -> vector<2x[8]xf32> { - %0 = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x[8]xf32> to vector<2x[8]xf32> - return %0 : vector<2x[8]xf32> -} -// CHECK-LABEL: @extract_strided_slice_f32_2d_from_2d_scalable( -// CHECK-SAME: %[[ARG:.*]]: vector<4x[8]xf32>) -// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x[8]xf32> to !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[T3:.*]] = arith.constant dense<0.000000e+00> : vector<2x[8]xf32> -// CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T3]] : vector<2x[8]xf32> to !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[T5:.*]] = llvm.extractvalue %[[T1]][2] : !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T5]], %[[T4]][0] : !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[T7:.*]] = llvm.extractvalue %[[T1]][3] : !llvm.array<4 x vector<[8]xf32>> -// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T6]][1] : !llvm.array<2 x vector<[8]xf32>> -// CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T8]] : !llvm.array<2 x vector<[8]xf32>> to vector<2x[8]xf32> -// CHECK: return %[[T9]] : vector<2x[8]xf32> +// CHECK-LABEL: @extract_strided_slice_f32_2d_from_2d_scalable( +// CHECK-SAME: %[[ARG:.*]]: vector<4x[8]xf32>) +// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<4x[8]xf32> to !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[T3:.*]] = arith.constant dense<0.000000e+00> : vector<2x[8]xf32> +// CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T3]] : vector<2x[8]xf32> to !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[T5:.*]] = llvm.extractvalue %[[T1]][2] : !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T5]], %[[T4]][0] : !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[T7:.*]] = llvm.extractvalue %[[T1]][3] : !llvm.array<4 x vector<[8]xf32>> +// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T6]][1] : !llvm.array<2 x vector<[8]xf32>> +// CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T8]] : !llvm.array<2 x vector<[8]xf32>> to vector<2x[8]xf32> +// CHECK: return %[[T9]] : vector<2x[8]xf32> // ----- @@ -2293,691 +1424,6 @@ func.func @fma_scalable(%vec_1d: vector<[8]xf32>, %vec_2d: vector<2x[4]xf32>, %v return %0, %1, %2: vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32> } - -// ----- - -//===----------------------------------------------------------------------===// -// vector.reduction -//===----------------------------------------------------------------------===// - -func.func @reduce_0d_f32(%arg0: vector) -> f32 { - %0 = vector.reduction , %arg0 : vector into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_0d_f32( -// CHECK-SAME: %[[A:.*]]: vector) -// CHECK: %[[CA:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[CA]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<1xf32>) -> f32 -// CHECK: return %[[V]] : f32 - -// ----- - -func.func @reduce_f16(%arg0: vector<16xf16>) -> f16 { - %0 = vector.reduction , %arg0 : vector<16xf16> into f16 - return %0 : f16 -} -// CHECK-LABEL: @reduce_f16( -// CHECK-SAME: %[[A:.*]]: vector<16xf16>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f16, vector<16xf16>) -> f16 -// CHECK: return %[[V]] : f16 - -// ----- - -func.func @reduce_f16_scalable(%arg0: vector<[16]xf16>) -> f16 { - %0 = vector.reduction , %arg0 : vector<[16]xf16> into f16 - return %0 : f16 -} -// CHECK-LABEL: @reduce_f16_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf16>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f16, vector<[16]xf16>) -> f16 -// CHECK: return %[[V]] : f16 - -// ----- - -func.func @reduce_f32(%arg0: vector<16xf32>) -> f32 { - %0 = vector.reduction , %arg0 : vector<16xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<16xf32>) -> f32 -// CHECK: return %[[V]] : f32 - -// ----- - -func.func @reduce_f32_scalable(%arg0: vector<[16]xf32>) -> f32 { - %0 = vector.reduction , %arg0 : vector<[16]xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f32, vector<[16]xf32>) -> f32 -// CHECK: return %[[V]] : f32 - -// ----- - -func.func @reduce_f64(%arg0: vector<16xf64>) -> f64 { - %0 = vector.reduction , %arg0 : vector<16xf64> into f64 - return %0 : f64 -} -// CHECK-LABEL: @reduce_f64( -// CHECK-SAME: %[[A:.*]]: vector<16xf64>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f64, vector<16xf64>) -> f64 -// CHECK: return %[[V]] : f64 - -// ----- - -func.func @reduce_f64_scalable(%arg0: vector<[16]xf64>) -> f64 { - %0 = vector.reduction , %arg0 : vector<[16]xf64> into f64 - return %0 : f64 -} -// CHECK-LABEL: @reduce_f64_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf64>) -// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]]) -// CHECK-SAME: <{fastmathFlags = #llvm.fastmath}> : (f64, vector<[16]xf64>) -> f64 -// CHECK: return %[[V]] : f64 - -// ----- - -func.func @reduce_i8(%arg0: vector<16xi8>) -> i8 { - %0 = vector.reduction , %arg0 : vector<16xi8> into i8 - return %0 : i8 -} -// CHECK-LABEL: @reduce_i8( -// CHECK-SAME: %[[A:.*]]: vector<16xi8>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i8 - -// ----- - -func.func @reduce_i8_scalable(%arg0: vector<[16]xi8>) -> i8 { - %0 = vector.reduction , %arg0 : vector<[16]xi8> into i8 - return %0 : i8 -} -// CHECK-LABEL: @reduce_i8_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi8>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i8 - -// ----- - -func.func @reduce_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.add %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.add %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_mul_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_mul_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_mul_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_mul_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_mul_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_mul_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.mul %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_mul_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_mul_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.mul %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_fmaximum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmaximum_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmaximum(%[[A]]) : (vector<16xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fmaximum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmaximum_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmaximum(%[[A]]) : (vector<[16]xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fminimum_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fminimum(%[[A]]) : (vector<16xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fminimum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fminimum_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fminimum(%[[A]]) : (vector<[16]xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmax_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmax(%[[A]]) : (vector<16xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fmax_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmax_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmax(%[[A]]) : (vector<[16]xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmin_f32( -// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmin(%[[A]]) : (vector<16xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_fmin_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xf32> into f32 - return %0 : f32 -} -// CHECK-LABEL: @reduce_fmin_f32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xf32>, %[[B:.*]]: f32) -// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmin(%[[A]]) : (vector<[16]xf32>) -> f32 -// CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32 -// CHECK: return %[[R]] : f32 - -// ----- - -func.func @reduce_minui_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minui_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minui_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minui_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "ule" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minui_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "ule" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxui_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxui_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxui_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxui_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "uge" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxui_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "uge" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minsi_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minsi_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minsi_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minsi_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "sle" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_minsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_minsi_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "sle" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxsi_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxsi_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxsi_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxsi_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "sge" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_maxsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_maxsi_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]]) -// CHECK: %[[S:.*]] = llvm.icmp "sge" %[[ACC]], %[[R]] -// CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_and_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_and_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_and_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_and_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_and_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_and_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.and %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_and_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_and_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.and"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.and %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_or_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_or_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_or_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_or_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_or_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_or_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.or %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_or_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_or_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.or"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.or %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_xor_i32(%arg0: vector<16xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_xor_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_xor_i32_scalable(%arg0: vector<[16]xi32>) -> i32 { - %0 = vector.reduction , %arg0 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_xor_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_xor_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_xor_acc_i32( -// CHECK-SAME: %[[A:.*]]: vector<16xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.xor %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_xor_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 { - %0 = vector.reduction , %arg0, %arg1 : vector<[16]xi32> into i32 - return %0 : i32 -} -// CHECK-LABEL: @reduce_xor_acc_i32_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi32>, %[[ACC:.*]]: i32) -// CHECK: %[[R:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]]) -// CHECK: %[[V:.*]] = llvm.xor %[[ACC]], %[[R]] -// CHECK: return %[[V]] : i32 - -// ----- - -func.func @reduce_i64(%arg0: vector<16xi64>) -> i64 { - %0 = vector.reduction , %arg0 : vector<16xi64> into i64 - return %0 : i64 -} -// CHECK-LABEL: @reduce_i64( -// CHECK-SAME: %[[A:.*]]: vector<16xi64>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i64 - -// ----- - -func.func @reduce_i64_scalable(%arg0: vector<[16]xi64>) -> i64 { - %0 = vector.reduction , %arg0 : vector<[16]xi64> into i64 - return %0 : i64 -} -// CHECK-LABEL: @reduce_i64_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xi64>) -// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: return %[[V]] : i64 - -// ----- - -func.func @reduce_index(%arg0: vector<16xindex>) -> index { - %0 = vector.reduction , %arg0 : vector<16xindex> into index - return %0 : index -} -// CHECK-LABEL: @reduce_index( -// CHECK-SAME: %[[A:.*]]: vector<16xindex>) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> -// CHECK: %[[T1:.*]] = "llvm.intr.vector.reduce.add"(%[[T0]]) -// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index -// CHECK: return %[[T2]] : index - -// ----- - -func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index { - %0 = vector.reduction , %arg0 : vector<[16]xindex> into index - return %0 : index -} -// CHECK-LABEL: @reduce_index_scalable( -// CHECK-SAME: %[[A:.*]]: vector<[16]xindex>) -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[16]xindex> to vector<[16]xi64> -// CHECK: %[[T1:.*]] = "llvm.intr.vector.reduce.add"(%[[T0]]) -// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index -// CHECK: return %[[T2]] : index - // ----- //===----------------------------------------------------------------------===// @@ -3081,591 +1527,143 @@ func.func @constant_mask_2d() -> vector<4x4xi1> { // ----- -func.func @constant_mask_2d_trailing_scalable() -> vector<4x[4]xi1> { - %0 = vector.constant_mask [2, 4] : vector<4x[4]xi1> - return %0 : vector<4x[4]xi1> -} -// CHECK-LABEL: func.func @constant_mask_2d_trailing_scalable -// CHECK: %[[VAL_0:.*]] = arith.constant dense : vector<[4]xi1> -// CHECK: %[[VAL_1:.*]] = arith.constant dense : vector<4x[4]xi1> -// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : vector<4x[4]xi1> to !llvm.array<4 x vector<[4]xi1>> -// CHECK: %[[VAL_3:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_2]][0] : !llvm.array<4 x vector<[4]xi1>> -// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_3]][1] : !llvm.array<4 x vector<[4]xi1>> -// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : !llvm.array<4 x vector<[4]xi1>> to vector<4x[4]xi1> -// CHECK: return %[[VAL_5]] : vector<4x[4]xi1> - -// ----- - -/// Currently, this is not supported as generating the mask would require -/// unrolling the leading scalable dimension at compile time. -func.func @negative_constant_mask_2d_leading_scalable() -> vector<[4]x4xi1> { - %0 = vector.constant_mask [4, 2] : vector<[4]x4xi1> - return %0 : vector<[4]x4xi1> -} -// CHECK-LABEL: func.func @negative_constant_mask_2d_leading_scalable -// CHECK: %[[VAL_0:.*]] = vector.constant_mask [4, 2] : vector<[4]x4xi1> -// CHECK: return %[[VAL_0]] : vector<[4]x4xi1> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.create_mask -//===----------------------------------------------------------------------===// - -func.func @create_mask_0d(%num_elems : index) -> vector { - %v = vector.create_mask %num_elems : vector - return %v: vector -} - -// CHECK-LABEL: func @create_mask_0d -// CHECK-SAME: %[[NUM_ELEMS:.*]]: index -// CHECK: %[[INDICES:.*]] = arith.constant dense<0> : vector -// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 -// CHECK: %[[BOUNDS:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]] -// CHECK: %[[BOUNDS_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOUNDS]] : vector<1xi32> to vector -// CHECK: %[[RESULT:.*]] = arith.cmpi sgt, %[[BOUNDS_CAST]], %[[INDICES]] : vector -// CHECK: return %[[RESULT]] : vector - -// ----- - -func.func @create_mask_1d(%num_elems : index) -> vector<4xi1> { - %v = vector.create_mask %num_elems : vector<4xi1> - return %v: vector<4xi1> -} - -// CHECK-LABEL: func @create_mask_1d -// CHECK-SAME: %[[NUM_ELEMS:.*]]: index -// CHECK: %[[INDICES:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xi32> -// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 -// CHECK: %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]] -// CHECK: %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]] -// CHECK: %[[RESULT:.*]] = arith.cmpi sgt, %[[BOUNDS]], %[[INDICES]] : vector<4xi32> -// CHECK: return %[[RESULT]] : vector<4xi1> - -// ----- - -func.func @create_mask_1d_scalable(%num_elems : index) -> vector<[4]xi1> { - %v = vector.create_mask %num_elems : vector<[4]xi1> - return %v: vector<[4]xi1> -} - -// CHECK-LABEL: func @create_mask_1d_scalable -// CHECK-SAME: %[[NUM_ELEMS:.*]]: index -// CHECK: %[[INDICES:.*]] = llvm.intr.stepvector : vector<[4]xi32> -// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 -// CHECK: %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]], {{.*}} : vector<[4]xi32> -// CHECK: %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]], {{.*}} : vector<[4]xi32> -// CHECK: %[[RESULT:.*]] = arith.cmpi slt, %[[INDICES]], %[[BOUNDS]] : vector<[4]xi32> -// CHECK: return %[[RESULT]] : vector<[4]xi1> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.transpose -//===----------------------------------------------------------------------===// - -func.func @transpose_0d(%arg0: vector) -> vector { - %0 = vector.transpose %arg0, [] : vector to vector - return %0 : vector -} - -// CHECK-LABEL: func @transpose_0d -// CHECK-SAME: %[[A:.*]]: vector -// CHECK: return %[[A]] : vector - -// ----- - -//===----------------------------------------------------------------------===// -// vector.flat_transpose -//===----------------------------------------------------------------------===// - -func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> { - %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } - : vector<16xf32> -> vector<16xf32> - return %0 : vector<16xf32> -} - -// CHECK-LABEL: func @flat_transpose -// CHECK-SAME: %[[A:.*]]: vector<16xf32> -// CHECK: %[[T:.*]] = llvm.intr.matrix.transpose %[[A]] -// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : -// CHECK-SAME: vector<16xf32> into vector<16xf32> -// CHECK: return %[[T]] : vector<16xf32> - -// ----- - -func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> { - %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } - : vector<16xindex> -> vector<16xindex> - return %0 : vector<16xindex> -} -// CHECK-LABEL: func @flat_transpose_index -// CHECK-SAME: %[[A:.*]]: vector<16xindex> -// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> -// CHECK: %[[T1:.*]] = llvm.intr.matrix.transpose %[[T0]] -// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : -// CHECK-SAME: vector<16xi64> into vector<16xi64> -// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<16xi64> to vector<16xindex> -// CHECK: return %[[T2]] : vector<16xindex> - -// ----- - -func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> { - %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } - : vector<16xf32> -> vector<16xf32> - return %0 : vector<16xf32> -} - -// CHECK-LABEL: func @flat_transpose -// CHECK-SAME: %[[A:.*]]: vector<16xf32> -// CHECK: %[[T:.*]] = llvm.intr.matrix.transpose %[[A]] -// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : -// CHECK-SAME: vector<16xf32> into vector<16xf32> -// CHECK: return %[[T]] : vector<16xf32> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.load -//===----------------------------------------------------------------------===// - -func.func @load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> { - %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32> - return %0 : vector<8xf32> -} - -// CHECK-LABEL: func @load -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64} : !llvm.ptr -> vector<8xf32> - -// ----- - -func.func @load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> { - %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<[8]xf32> - return %0 : vector<[8]xf32> -} - -// CHECK-LABEL: func @load_scalable -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64} : !llvm.ptr -> vector<[8]xf32> - -// ----- - -func.func @load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> { - %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<8xf32> - return %0 : vector<8xf32> -} - -// CHECK-LABEL: func @load_nontemporal -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64, nontemporal} : !llvm.ptr -> vector<8xf32> - -// ----- - -func.func @load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> { - %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[8]xf32> - return %0 : vector<[8]xf32> -} - -// CHECK-LABEL: func @load_nontemporal_scalable -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.load %[[GEP]] {alignment = 4 : i64, nontemporal} : !llvm.ptr -> vector<[8]xf32> - -// ----- - -func.func @load_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> { - %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<8xindex> - return %0 : vector<8xindex> -} -// CHECK-LABEL: func @load_index -// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xi64> -// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<8xi64> to vector<8xindex> -// CHECK: return %[[T1]] : vector<8xindex> - -// ----- - -func.func @load_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<[8]xindex> { - %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<[8]xindex> - return %0 : vector<[8]xindex> -} -// CHECK-LABEL: func @load_index_scalable -// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<[8]xi64> -// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<[8]xi64> to vector<[8]xindex> -// CHECK: return %[[T1]] : vector<[8]xindex> - -// ----- - -func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector { - %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector - return %0 : vector -} - -// CHECK-LABEL: func @load_0d -// CHECK: %[[J:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 -// CHECK: %[[CAST_MEMREF:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref<200x100xf32> to !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %[[I]], %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %[[J]] : i64 -// CHECK: %[[ADDR:.*]] = llvm.getelementptr %[[REF]][%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[LOAD:.*]] = llvm.load %[[ADDR]] {alignment = 4 : i64} : !llvm.ptr -> vector<1xf32> -// CHECK: %[[RES:.*]] = builtin.unrealized_conversion_cast %[[LOAD]] : vector<1xf32> to vector -// CHECK: return %[[RES]] : vector -// ----- - -//===----------------------------------------------------------------------===// -// vector.store -//===----------------------------------------------------------------------===// - -func.func @store(%memref : memref<200x100xf32>, %i : index, %j : index) { - %val = arith.constant dense<11.0> : vector<4xf32> - vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<4xf32> - return -} - -// CHECK-LABEL: func @store -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64} : vector<4xf32>, !llvm.ptr - -// ----- - -func.func @store_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) { - %val = arith.constant dense<11.0> : vector<[4]xf32> - vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<[4]xf32> - return -} - -// CHECK-LABEL: func @store_scalable -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64} : vector<[4]xf32>, !llvm.ptr - -// ----- - -func.func @store_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) { - %val = arith.constant dense<11.0> : vector<4xf32> - vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<4xf32> - return -} - -// CHECK-LABEL: func @store_nontemporal -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64, nontemporal} : vector<4xf32>, !llvm.ptr - -// ----- - -func.func @store_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) { - %val = arith.constant dense<11.0> : vector<[4]xf32> - vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[4]xf32> - return -} - -// CHECK-LABEL: func @store_nontemporal_scalable -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}} : i64 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 4 : i64, nontemporal} : vector<[4]xf32>, !llvm.ptr - -// ----- - -func.func @store_index(%memref : memref<200x100xindex>, %i : index, %j : index) { - %val = arith.constant dense<11> : vector<4xindex> - vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<4xindex> - return -} -// CHECK-LABEL: func @store_index -// CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xi64>, !llvm.ptr - -// ----- - -func.func @store_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) { - %val = arith.constant dense<11> : vector<[4]xindex> - vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<[4]xindex> - return -} -// CHECK-LABEL: func @store_index_scalable -// CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<[4]xi64>, !llvm.ptr - -// ----- - -func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) { - %val = arith.constant dense<11.0> : vector - vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector - return -} - -// CHECK-LABEL: func @store_0d -// CHECK: %[[J:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 -// CHECK: %[[I:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i64 -// CHECK: %[[CAST_MEMREF:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref<200x100xf32> to !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: %[[CST:.*]] = arith.constant dense<1.100000e+01> : vector -// CHECK: %[[VAL:.*]] = builtin.unrealized_conversion_cast %[[CST]] : vector to vector<1xf32> -// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 -// CHECK: %[[MUL:.*]] = llvm.mul %[[I]], %[[C100]] : i64 -// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %[[J]] : i64 -// CHECK: %[[ADDR:.*]] = llvm.getelementptr %[[REF]][%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.store %[[VAL]], %[[ADDR]] {alignment = 4 : i64} : vector<1xf32>, !llvm.ptr -// CHECK: return - -// ----- - -//===----------------------------------------------------------------------===// -// vector.maskedload -//===----------------------------------------------------------------------===// - -func.func @masked_load(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> { - %c0 = arith.constant 0: index - %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xf32> into vector<16xf32> - return %0 : vector<16xf32> -} - -// CHECK-LABEL: func @masked_load -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32> -// CHECK: return %[[L]] : vector<16xf32> - -// ----- - -func.func @masked_load_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xf32>) -> vector<[16]xf32> { - %c0 = arith.constant 0: index - %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xf32> into vector<[16]xf32> - return %0 : vector<[16]xf32> -} - -// CHECK-LABEL: func @masked_load_scalable -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr, vector<[16]xi1>, vector<[16]xf32>) -> vector<[16]xf32> -// CHECK: return %[[L]] : vector<[16]xf32> - -// ----- - -func.func @masked_load_index(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xindex>) -> vector<16xindex> { - %c0 = arith.constant 0: index - %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xindex> into vector<16xindex> - return %0 : vector<16xindex> -} -// CHECK-LABEL: func @masked_load_index -// CHECK: %{{.*}} = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xi64>) -> vector<16xi64> - -// ----- - -func.func @masked_load_index_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xindex>) -> vector<[16]xindex> { - %c0 = arith.constant 0: index - %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xindex> into vector<[16]xindex> - return %0 : vector<[16]xindex> -} -// CHECK-LABEL: func @masked_load_index_scalable -// CHECK: %{{.*}} = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<[16]xi1>, vector<[16]xi64>) -> vector<[16]xi64> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.maskedstore -//===----------------------------------------------------------------------===// - -func.func @masked_store(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xf32>) { - %c0 = arith.constant 0: index - vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xf32> - return -} - -// CHECK-LABEL: func @masked_store -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr - -// ----- - -func.func @masked_store_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xf32>) { - %c0 = arith.constant 0: index - vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xf32> - return -} - -// CHECK-LABEL: func @masked_store_scalable -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[16]xf32>, vector<[16]xi1> into !llvm.ptr - -// ----- - -func.func @masked_store_index(%arg0: memref, %arg1: vector<16xi1>, %arg2: vector<16xindex>) { - %c0 = arith.constant 0: index - vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<16xi1>, vector<16xindex> - return +func.func @constant_mask_2d_trailing_scalable() -> vector<4x[4]xi1> { + %0 = vector.constant_mask [2, 4] : vector<4x[4]xi1> + return %0 : vector<4x[4]xi1> } -// CHECK-LABEL: func @masked_store_index -// CHECK: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<16xi64>, vector<16xi1> into !llvm.ptr +// CHECK-LABEL: func.func @constant_mask_2d_trailing_scalable +// CHECK: %[[VAL_0:.*]] = arith.constant dense : vector<[4]xi1> +// CHECK: %[[VAL_1:.*]] = arith.constant dense : vector<4x[4]xi1> +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : vector<4x[4]xi1> to !llvm.array<4 x vector<[4]xi1>> +// CHECK: %[[VAL_3:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_2]][0] : !llvm.array<4 x vector<[4]xi1>> +// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_3]][1] : !llvm.array<4 x vector<[4]xi1>> +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : !llvm.array<4 x vector<[4]xi1>> to vector<4x[4]xi1> +// CHECK: return %[[VAL_5]] : vector<4x[4]xi1> // ----- -func.func @masked_store_index_scalable(%arg0: memref, %arg1: vector<[16]xi1>, %arg2: vector<[16]xindex>) { - %c0 = arith.constant 0: index - vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref, vector<[16]xi1>, vector<[16]xindex> - return +/// Currently, this is not supported as generating the mask would require +/// unrolling the leading scalable dimension at compile time. +func.func @negative_constant_mask_2d_leading_scalable() -> vector<[4]x4xi1> { + %0 = vector.constant_mask [4, 2] : vector<[4]x4xi1> + return %0 : vector<[4]x4xi1> } -// CHECK-LABEL: func @masked_store_index_scalable -// CHECK: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<[16]xi64>, vector<[16]xi1> into !llvm.ptr +// CHECK-LABEL: func.func @negative_constant_mask_2d_leading_scalable +// CHECK: %[[VAL_0:.*]] = vector.constant_mask [4, 2] : vector<[4]x4xi1> +// CHECK: return %[[VAL_0]] : vector<[4]x4xi1> // ----- //===----------------------------------------------------------------------===// -// vector.gather +// vector.create_mask //===----------------------------------------------------------------------===// -func.func @gather(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32> - return %1 : vector<3xf32> -} - -// CHECK-LABEL: func @gather -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> -// CHECK: return %[[G]] : vector<3xf32> - -// ----- - -func.func @gather_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) -> vector<[3]xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> into vector<[3]xf32> - return %1 : vector<[3]xf32> +func.func @create_mask_0d(%num_elems : index) -> vector { + %v = vector.create_mask %num_elems : vector + return %v: vector } -// CHECK-LABEL: func @gather_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> -// CHECK: return %[[G]] : vector<[3]xf32> +// CHECK-LABEL: func @create_mask_0d +// CHECK-SAME: %[[NUM_ELEMS:.*]]: index +// CHECK: %[[INDICES:.*]] = arith.constant dense<0> : vector +// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 +// CHECK: %[[BOUNDS:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]] +// CHECK: %[[BOUNDS_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOUNDS]] : vector<1xi32> to vector +// CHECK: %[[RESULT:.*]] = arith.cmpi sgt, %[[BOUNDS_CAST]], %[[INDICES]] : vector +// CHECK: return %[[RESULT]] : vector // ----- -func.func @gather_global_memory(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32> - return %1 : vector<3xf32> +func.func @create_mask_1d(%num_elems : index) -> vector<4xi1> { + %v = vector.create_mask %num_elems : vector<4xi1> + return %v: vector<4xi1> } -// CHECK-LABEL: func @gather_global_memory -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<3xi32>) -> !llvm.vec<3 x ptr<1>>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr<1>>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> -// CHECK: return %[[G]] : vector<3xf32> +// CHECK-LABEL: func @create_mask_1d +// CHECK-SAME: %[[NUM_ELEMS:.*]]: index +// CHECK: %[[INDICES:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xi32> +// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 +// CHECK: %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]] +// CHECK: %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]] +// CHECK: %[[RESULT:.*]] = arith.cmpi sgt, %[[BOUNDS]], %[[INDICES]] : vector<4xi32> +// CHECK: return %[[RESULT]] : vector<4xi1> // ----- -func.func @gather_global_memory_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) -> vector<[3]xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> into vector<[3]xf32> - return %1 : vector<[3]xf32> +func.func @create_mask_1d_scalable(%num_elems : index) -> vector<[4]xi1> { + %v = vector.create_mask %num_elems : vector<[4]xi1> + return %v: vector<[4]xi1> } -// CHECK-LABEL: func @gather_global_memory_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<[3]xi32>) -> !llvm.vec>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> -// CHECK: return %[[G]] : vector<[3]xf32> +// CHECK-LABEL: func @create_mask_1d_scalable +// CHECK-SAME: %[[NUM_ELEMS:.*]]: index +// CHECK: %[[INDICES:.*]] = llvm.intr.stepvector : vector<[4]xi32> +// CHECK: %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32 +// CHECK: %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]], {{.*}} : vector<[4]xi32> +// CHECK: %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]], {{.*}} : vector<[4]xi32> +// CHECK: %[[RESULT:.*]] = arith.cmpi slt, %[[INDICES]], %[[BOUNDS]] : vector<[4]xi32> +// CHECK: return %[[RESULT]] : vector<[4]xi1> // ----- +//===----------------------------------------------------------------------===// +// vector.flat_transpose +//===----------------------------------------------------------------------===// -func.func @gather_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) -> vector<3xindex> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xindex>, vector<3xi1>, vector<3xindex> into vector<3xindex> - return %1 : vector<3xindex> +func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> { + %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } + : vector<16xf32> -> vector<16xf32> + return %0 : vector<16xf32> } -// CHECK-LABEL: func @gather_index -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xi64>) -> vector<3xi64> -// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<3xi64> to vector<3xindex> +// CHECK-LABEL: func @flat_transpose +// CHECK-SAME: %[[A:.*]]: vector<16xf32> +// CHECK: %[[T:.*]] = llvm.intr.matrix.transpose %[[A]] +// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : +// CHECK-SAME: vector<16xf32> into vector<16xf32> +// CHECK: return %[[T]] : vector<16xf32> // ----- -func.func @gather_index_scalable(%arg0: memref, %arg1: vector<[3]xindex>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xindex>) -> vector<[3]xindex> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xindex>, vector<[3]xi1>, vector<[3]xindex> into vector<[3]xindex> - return %1 : vector<[3]xindex> +func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> { + %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } + : vector<16xindex> -> vector<16xindex> + return %0 : vector<16xindex> } - -// CHECK-LABEL: func @gather_index_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xi64>) -> vector<[3]xi64> -// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<[3]xi64> to vector<[3]xindex> +// CHECK-LABEL: func @flat_transpose_index +// CHECK-SAME: %[[A:.*]]: vector<16xindex> +// CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64> +// CHECK: %[[T1:.*]] = llvm.intr.matrix.transpose %[[T0]] +// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : +// CHECK-SAME: vector<16xi64> into vector<16xi64> +// CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<16xi64> to vector<16xindex> +// CHECK: return %[[T2]] : vector<16xindex> // ----- -func.func @gather_2d_from_1d(%arg0: memref, %arg1: vector<2x3xi32>, %arg2: vector<2x3xi1>, %arg3: vector<2x3xf32>) -> vector<2x3xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<2x3xi32>, vector<2x3xi1>, vector<2x3xf32> into vector<2x3xf32> - return %1 : vector<2x3xf32> +func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> { + %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 } + : vector<16xf32> -> vector<16xf32> + return %0 : vector<16xf32> } -// CHECK-LABEL: func @gather_2d_from_1d -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}} : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[I0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xi32>> -// CHECK: %[[M0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xi1>> -// CHECK: %[[S0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> -// CHECK: %[[P0:.*]] = llvm.getelementptr %[[B]][%[[I0]]] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %[[P0]], %[[M0]], %[[S0]] {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> -// CHECK: %{{.*}} = llvm.insertvalue %[[G0]], %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> -// CHECK: %[[I1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xi32>> -// CHECK: %[[M1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xi1>> -// CHECK: %[[S1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> -// CHECK: %[[P1:.*]] = llvm.getelementptr %[[B]][%[[I1]]] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %[[P1]], %[[M1]], %[[S1]] {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> -// CHECK: %{{.*}} = llvm.insertvalue %[[G1]], %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> +// CHECK-LABEL: func @flat_transpose +// CHECK-SAME: %[[A:.*]]: vector<16xf32> +// CHECK: %[[T:.*]] = llvm.intr.matrix.transpose %[[A]] +// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} : +// CHECK-SAME: vector<16xf32> into vector<16xf32> +// CHECK: return %[[T]] : vector<16xf32> // ----- -func.func @gather_2d_from_1d_scalable(%arg0: memref, %arg1: vector<2x[3]xi32>, %arg2: vector<2x[3]xi1>, %arg3: vector<2x[3]xf32>) -> vector<2x[3]xf32> { - %0 = arith.constant 0: index - %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<2x[3]xi32>, vector<2x[3]xi1>, vector<2x[3]xf32> into vector<2x[3]xf32> - return %1 : vector<2x[3]xf32> -} - -// CHECK-LABEL: func @gather_2d_from_1d_scalable -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}} : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[I0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xi32>> -// CHECK: %[[M0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xi1>> -// CHECK: %[[S0:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<[3]xf32>> -// CHECK: %[[P0:.*]] = llvm.getelementptr %[[B]][%[[I0]]] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %[[P0]], %[[M0]], %[[S0]] {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> -// CHECK: %{{.*}} = llvm.insertvalue %[[G0]], %{{.*}}[0] : !llvm.array<2 x vector<[3]xf32>> -// CHECK: %[[I1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xi32>> -// CHECK: %[[M1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xi1>> -// CHECK: %[[S1:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<[3]xf32>> -// CHECK: %[[P1:.*]] = llvm.getelementptr %[[B]][%[[I1]]] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %[[P1]], %[[M1]], %[[S1]] {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> -// CHECK: %{{.*}} = llvm.insertvalue %[[G1]], %{{.*}}[1] : !llvm.array<2 x vector<[3]xf32>> +//===----------------------------------------------------------------------===// +// vector.gather +// +// NOTE: vector.constant_mask won't lower with +// * --convert-to-llvm="filter-dialects=vector", +// hence testing here. +//===----------------------------------------------------------------------===// -// ----- func.func @gather_with_mask(%arg0: memref, %arg1: vector<2x3xi32>, %arg2: vector<2x3xf32>) -> vector<2x3xf32> { %0 = arith.constant 0: index @@ -3725,290 +1723,10 @@ func.func @gather_with_zero_mask_scalable(%arg0: memref, %arg1: vector<2x // ----- -func.func @gather_1d_from_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) -> vector<4xf32> { - %0 = arith.constant 3 : index - %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> into vector<4xf32> - return %1 : vector<4xf32> -} - -// CHECK-LABEL: func @gather_1d_from_2d -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<4 x ptr>, vector<4xi1>, vector<4xf32>) -> vector<4xf32> -// CHECK: return %[[G]] : vector<4xf32> - -// ----- - -func.func @gather_1d_from_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]xi32>, %arg2: vector<[4]xi1>, %arg3: vector<[4]xf32>) -> vector<[4]xf32> { - %0 = arith.constant 3 : index - %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x?xf32>, vector<[4]xi32>, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> - return %1 : vector<[4]xf32> -} - -// CHECK-LABEL: func @gather_1d_from_2d_scalable -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[4]xi1>, vector<[4]xf32>) -> vector<[4]xf32> -// CHECK: return %[[G]] : vector<[4]xf32> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.scatter -//===----------------------------------------------------------------------===// - -func.func @scatter(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) { - %0 = arith.constant 0: index - vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xi32>, vector<3xi1>, vector<3xf32> - return -} - -// CHECK-LABEL: func @scatter -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<3xf32>, vector<3xi1> into !llvm.vec<3 x ptr> - -// ----- - -func.func @scatter_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xf32>) { - %0 = arith.constant 0: index - vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xi32>, vector<[3]xi1>, vector<[3]xf32> - return -} - -// CHECK-LABEL: func @scatter_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[3]xf32>, vector<[3]xi1> into !llvm.vec - -// ----- - -func.func @scatter_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) { - %0 = arith.constant 0: index - vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<3xindex>, vector<3xi1>, vector<3xindex> - return -} - -// CHECK-LABEL: func @scatter_index -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<3xi64>, vector<3xi1> into !llvm.vec<3 x ptr> - -// ----- - -func.func @scatter_index_scalable(%arg0: memref, %arg1: vector<[3]xindex>, %arg2: vector<[3]xi1>, %arg3: vector<[3]xindex>) { - %0 = arith.constant 0: index - vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref, vector<[3]xindex>, vector<[3]xi1>, vector<[3]xindex> - return -} - -// CHECK-LABEL: func @scatter_index_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<[3]xi64>, vector<[3]xi1> into !llvm.vec - -// ----- - -func.func @scatter_1d_into_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) { - %0 = arith.constant 3 : index - vector.scatter %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> - return -} - -// CHECK-LABEL: func @scatter_1d_into_2d -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<4xf32>, vector<4xi1> into !llvm.vec<4 x ptr> - -// ----- - -func.func @scatter_1d_into_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]xi32>, %arg2: vector<[4]xi1>, %arg3: vector<[4]xf32>) { - %0 = arith.constant 3 : index - vector.scatter %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x?xf32>, vector<[4]xi32>, vector<[4]xi1>, vector<[4]xf32> - return -} - -// CHECK-LABEL: func @scatter_1d_into_2d_scalable -// CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[4]xf32>, vector<[4]xi1> into !llvm.vec - -// ----- - -//===----------------------------------------------------------------------===// -// vector.expandload -//===----------------------------------------------------------------------===// - -func.func @expand_load_op(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> { - %c0 = arith.constant 0: index - %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xf32> into vector<11xf32> - return %0 : vector<11xf32> -} - -// CHECK-LABEL: func @expand_load_op -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[E:.*]] = "llvm.intr.masked.expandload"(%[[P]], %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<11xi1>, vector<11xf32>) -> vector<11xf32> -// CHECK: return %[[E]] : vector<11xf32> - -// ----- - -func.func @expand_load_op_index(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xindex>) -> vector<11xindex> { - %c0 = arith.constant 0: index - %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xindex> into vector<11xindex> - return %0 : vector<11xindex> -} -// CHECK-LABEL: func @expand_load_op_index -// CHECK: %{{.*}} = "llvm.intr.masked.expandload"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<11xi1>, vector<11xi64>) -> vector<11xi64> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.compressstore -//===----------------------------------------------------------------------===// - -func.func @compress_store_op(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xf32>) { - %c0 = arith.constant 0: index - vector.compressstore %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xf32> - return -} - -// CHECK-LABEL: func @compress_store_op -// CHECK: %[[CO:.*]] = arith.constant 0 : index -// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64 -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %[[P]], %{{.*}}) : (vector<11xf32>, !llvm.ptr, vector<11xi1>) -> () - -// ----- - -func.func @compress_store_op_index(%arg0: memref, %arg1: vector<11xi1>, %arg2: vector<11xindex>) { - %c0 = arith.constant 0: index - vector.compressstore %arg0[%c0], %arg1, %arg2 : memref, vector<11xi1>, vector<11xindex> - return -} -// CHECK-LABEL: func @compress_store_op_index -// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<11xi64>, !llvm.ptr, vector<11xi1>) -> () - -// ----- - -//===----------------------------------------------------------------------===// -// vector.splat -//===----------------------------------------------------------------------===// - -// CHECK-LABEL: @splat_0d -// CHECK-SAME: %[[ELT:.*]]: f32 -func.func @splat_0d(%elt: f32) -> vector { - %v = vector.splat %elt : vector - return %v : vector -} -// CHECK-NEXT: %[[POISON:[0-9]+]] = llvm.mlir.poison : vector<1xf32> -// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[POISON]][%[[ZERO]] : i32] : vector<1xf32> -// CHECK-NEXT: %[[VCAST:[0-9]+]] = builtin.unrealized_conversion_cast %[[V]] : vector<1xf32> to vector -// CHECK-NEXT: return %[[VCAST]] : vector - -// ----- - -// CHECK-LABEL: @splat -// CHECK-SAME: %[[VEC:[0-9a-zA-Z]+]]: vector<4xf32> -// CHECK-SAME: %[[ELT:[0-9a-zA-Z]+]]: f32 -func.func @splat(%vec: vector<4xf32>, %elt: f32) -> vector<4xf32> { - %vb = vector.splat %elt : vector<4xf32> - %r = arith.mulf %vec, %vb : vector<4xf32> - return %r : vector<4xf32> -} -// CHECK-NEXT: %[[POISON:[0-9]+]] = llvm.mlir.poison : vector<4xf32> -// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[POISON]][%[[ZERO]] : i32] : vector<4xf32> -// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[POISON]] [0, 0, 0, 0] -// CHECK-NEXT: %[[SCALE:[0-9]+]] = arith.mulf %[[VEC]], %[[SPLAT]] : vector<4xf32> -// CHECK-NEXT: return %[[SCALE]] : vector<4xf32> - -// ----- - -// CHECK-LABEL: @splat_scalable -// CHECK-SAME: %[[VEC:[0-9a-zA-Z]+]]: vector<[4]xf32> -// CHECK-SAME: %[[ELT:[0-9a-zA-Z]+]]: f32 -func.func @splat_scalable(%vec: vector<[4]xf32>, %elt: f32) -> vector<[4]xf32> { - %vb = vector.splat %elt : vector<[4]xf32> - %r = arith.mulf %vec, %vb : vector<[4]xf32> - return %r : vector<[4]xf32> -} -// CHECK-NEXT: %[[POISON:[0-9]+]] = llvm.mlir.poison : vector<[4]xf32> -// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[POISON]][%[[ZERO]] : i32] : vector<[4]xf32> -// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[POISON]] [0, 0, 0, 0] -// CHECK-NEXT: %[[SCALE:[0-9]+]] = arith.mulf %[[VEC]], %[[SPLAT]] : vector<[4]xf32> -// CHECK-NEXT: return %[[SCALE]] : vector<[4]xf32> - -// ----- - -//===----------------------------------------------------------------------===// -// vector.scalable_insert -//===----------------------------------------------------------------------===// - -// CHECK-LABEL: @scalable_insert -// CHECK-SAME: %[[SUB:.*]]: vector<4xf32>, %[[SV:.*]]: vector<[4]xf32> -func.func @scalable_insert(%sub: vector<4xf32>, %dsv: vector<[4]xf32>) -> vector<[4]xf32> { - // CHECK-NEXT: %[[TMP:.*]] = llvm.intr.vector.insert %[[SUB]], %[[SV]][0] : vector<4xf32> into vector<[4]xf32> - %0 = vector.scalable.insert %sub, %dsv[0] : vector<4xf32> into vector<[4]xf32> - // CHECK-NEXT: llvm.intr.vector.insert %[[SUB]], %[[TMP]][4] : vector<4xf32> into vector<[4]xf32> - %1 = vector.scalable.insert %sub, %0[4] : vector<4xf32> into vector<[4]xf32> - return %1 : vector<[4]xf32> -} - -// ----- - -//===----------------------------------------------------------------------===// -// vector.scalable_extract -//===----------------------------------------------------------------------===// - -// CHECK-LABEL: @scalable_extract -// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32> -func.func @scalable_extract(%vec: vector<[4]xf32>) -> vector<8xf32> { - // CHECK-NEXT: %{{.*}} = llvm.intr.vector.extract %[[VEC]][0] : vector<8xf32> from vector<[4]xf32> - %0 = vector.scalable.extract %vec[0] : vector<8xf32> from vector<[4]xf32> - return %0 : vector<8xf32> -} - -// ----- - //===----------------------------------------------------------------------===// // vector.interleave //===----------------------------------------------------------------------===// -// CHECK-LABEL: @interleave_0d -// CHECK-SAME: %[[LHS:.*]]: vector, %[[RHS:.*]]: vector) -func.func @interleave_0d(%a: vector, %b: vector) -> vector<2xi8> { - // CHECK-DAG: %[[LHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[LHS]] : vector to vector<1xi8> - // CHECK-DAG: %[[RHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[RHS]] : vector to vector<1xi8> - // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS_RANK1]], %[[RHS_RANK1]] [0, 1] : vector<1xi8> - // CHECK: return %[[ZIP]] - %0 = vector.interleave %a, %b : vector -> vector<2xi8> - return %0 : vector<2xi8> -} - -// ----- - -// CHECK-LABEL: @interleave_1d -// CHECK-SAME: %[[LHS:.*]]: vector<8xf32>, %[[RHS:.*]]: vector<8xf32>) -func.func @interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32> { - // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS]], %[[RHS]] [0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] : vector<8xf32> - // CHECK: return %[[ZIP]] - %0 = vector.interleave %a, %b : vector<8xf32> -> vector<16xf32> - return %0 : vector<16xf32> -} - -// ----- - -// CHECK-LABEL: @interleave_1d_scalable -// CHECK-SAME: %[[LHS:.*]]: vector<[4]xi32>, %[[RHS:.*]]: vector<[4]xi32>) -func.func @interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32> { - // CHECK: %[[ZIP:.*]] = "llvm.intr.vector.interleave2"(%[[LHS]], %[[RHS]]) : (vector<[4]xi32>, vector<[4]xi32>) -> vector<[8]xi32> - // CHECK: return %[[ZIP]] - %0 = vector.interleave %a, %b : vector<[4]xi32> -> vector<[8]xi32> - return %0 : vector<[8]xi32> -} - -// ----- - // CHECK-LABEL: @interleave_2d // CHECK-SAME: %[[LHS:.*]]: vector<2x3xi8>, %[[RHS:.*]]: vector<2x3xi8>) func.func @interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8> { @@ -4035,30 +1753,6 @@ func.func @interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) // vector.deinterleave //===----------------------------------------------------------------------===// -// CHECK-LABEL: @deinterleave_1d -// CHECK-SAME: (%[[ARG:.*]]: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) -func.func @deinterleave_1d(%arg: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) { - // CHECK: %[[POISON:.*]] = llvm.mlir.poison : vector<4xi32> - // CHECK: llvm.shufflevector %[[ARG]], %[[POISON]] [0, 2] : vector<4xi32> - // CHECK: llvm.shufflevector %[[ARG]], %[[POISON]] [1, 3] : vector<4xi32> - %0, %1 = vector.deinterleave %arg : vector<4xi32> -> vector<2xi32> - return %0, %1 : vector<2xi32>, vector<2xi32> -} - -// ----- - -// CHECK-LABEL: @deinterleave_1d_scalable -// CHECK-SAME: %[[ARG:.*]]: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) -func.func @deinterleave_1d_scalable(%arg: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) { - // CHECK: %[[RES:.*]] = "llvm.intr.vector.deinterleave2"(%[[ARG]]) : (vector<[4]xi32>) -> !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> - // CHECK: llvm.extractvalue %[[RES]][0] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> - // CHECK: llvm.extractvalue %[[RES]][1] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)> - %0, %1 = vector.deinterleave %arg : vector<[4]xi32> -> vector<[2]xi32> - return %0, %1 : vector<[2]xi32>, vector<[2]xi32> -} - -// ----- - // CHECK-LABEL: @deinterleave_2d // CHECK-SAME: %[[ARG:.*]]: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>) func.func @deinterleave_2d(%arg: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>) { @@ -4077,40 +1771,6 @@ func.func @deinterleave_2d_scalable(%arg: vector<2x[8]xf32>) -> (vector<2x[4]xf3 return %0, %1 : vector<2x[4]xf32>, vector<2x[4]xf32> } -// ----- - -//===----------------------------------------------------------------------===// -// vector.from_elements -//===----------------------------------------------------------------------===// - -// CHECK-LABEL: func.func @from_elements_1d( -// CHECK-SAME: %[[ARG_0:.*]]: f32, %[[ARG_1:.*]]: f32) -// CHECK: %[[POISON:.*]] = llvm.mlir.poison : vector<3xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[POISON]][%[[C0]] : i64] : vector<3xf32> -// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[INSERT1:.*]] = llvm.insertelement %[[ARG_1]], %[[INSERT0]][%[[C1]] : i64] : vector<3xf32> -// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64 -// CHECK: %[[INSERT2:.*]] = llvm.insertelement %[[ARG_0]], %[[INSERT1]][%[[C2]] : i64] : vector<3xf32> -// CHECK: return %[[INSERT2]] -func.func @from_elements_1d(%arg0: f32, %arg1: f32) -> vector<3xf32> { - %0 = vector.from_elements %arg0, %arg1, %arg0 : vector<3xf32> - return %0 : vector<3xf32> -} - -// ----- - -// CHECK-LABEL: func.func @from_elements_0d( -// CHECK-SAME: %[[ARG_0:.*]]: f32) -// CHECK: %[[POISON:.*]] = llvm.mlir.poison : vector<1xf32> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[POISON]][%[[C0]] : i64] : vector<1xf32> -// CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[INSERT0]] : vector<1xf32> to vector -// CHECK: return %[[CAST]] -func.func @from_elements_0d(%arg0: f32) -> vector { - %0 = vector.from_elements %arg0 : vector - return %0 : vector -} // ----- @@ -4118,6 +1778,8 @@ func.func @from_elements_0d(%arg0: f32) -> vector { // vector.step //===----------------------------------------------------------------------===// +// TODO: Investigate why this wouldn't lower with --convert-to-llvm="filter-dialects=vector" + // CHECK-LABEL: @step // CHECK: %[[CST:.+]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex> // CHECK: return %[[CST]] : vector<4xindex> @@ -4125,14 +1787,3 @@ func.func @step() -> vector<4xindex> { %0 = vector.step : vector<4xindex> return %0 : vector<4xindex> } - -// ----- - -// CHECK-LABEL: @step_scalable -// CHECK: %[[STEPVECTOR:.*]] = llvm.intr.stepvector : vector<[4]xi64> -// CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex> -// CHECK: return %[[CAST]] : vector<[4]xindex> -func.func @step_scalable() -> vector<[4]xindex> { - %0 = vector.step : vector<[4]xindex> - return %0 : vector<[4]xindex> -} diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir index 3f0bf1962e299..4701ac5d96009 100644 --- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir +++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir @@ -1004,6 +1004,27 @@ func.func @vector_load(%arg0 : memref<4xf32, #spirv.storage_class return %0: vector<4xf32> } + +// CHECK-LABEL: @vector_load_single_elem +// CHECK-SAME: (%[[ARG0:.*]]: memref<4xf32, #spirv.storage_class>) +// CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK: %[[C0:.+]] = arith.constant 0 : index +// CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32 +// CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32 +// CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32 +// CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32 +// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S1]]] : !spirv.ptr [0])>, StorageBuffer>, i32, i32 +// CHECK: %[[S5:.+]] = spirv.Load "StorageBuffer" %[[S4]] : f32 +// CHECK: %[[R0:.+]] = builtin.unrealized_conversion_cast %[[S5]] : f32 to vector<1xf32> +// CHECK: return %[[R0]] : vector<1xf32> +func.func @vector_load_single_elem(%arg0 : memref<4xf32, #spirv.storage_class>) -> vector<1xf32> { + %idx = arith.constant 0 : index + %cst_0 = arith.constant 0.000000e+00 : f32 + %0 = vector.load %arg0[%idx] : memref<4xf32, #spirv.storage_class>, vector<1xf32> + return %0: vector<1xf32> +} + + // CHECK-LABEL: @vector_load_2d // CHECK-SAME: (%[[ARG0:.*]]: memref<4x4xf32, #spirv.storage_class>) -> vector<4xf32> { // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4x4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> @@ -1046,6 +1067,24 @@ func.func @vector_store(%arg0 : memref<4xf32, #spirv.storage_class> +// CHECK-SAME: %[[ARG1:.*]]: vector<1xf32> +// CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : vector<1xf32> to f32 +// CHECK: %[[C0:.+]] = arith.constant 0 : index +// CHECK: %[[S2:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32 +// CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32 +// CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32 +// CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32 +// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S2]]] : !spirv.ptr [0])>, StorageBuffer>, i32, i32 -> !spirv.ptr +// CHECK: spirv.Store "StorageBuffer" %[[S4]], %[[S1]] : f32 +func.func @vector_store_single_elem(%arg0 : memref<4xf32, #spirv.storage_class>, %arg1 : vector<1xf32>) { + %idx = arith.constant 0 : index + vector.store %arg1, %arg0[%idx] : memref<4xf32, #spirv.storage_class>, vector<1xf32> + return +} + // CHECK-LABEL: @vector_store_2d // CHECK-SAME: (%[[ARG0:.*]]: memref<4x4xf32, #spirv.storage_class> // CHECK-SAME: %[[ARG1:.*]]: vector<4xf32> diff --git a/mlir/test/Dialect/Affine/loop-fusion-4.mlir b/mlir/test/Dialect/Affine/loop-fusion-4.mlir index ea144f73bb21c..2830235431c76 100644 --- a/mlir/test/Dialect/Affine/loop-fusion-4.mlir +++ b/mlir/test/Dialect/Affine/loop-fusion-4.mlir @@ -285,3 +285,63 @@ module { spirv.ReturnValue %3 : !spirv.array<8192 x f32> } } + +// ----- + +// PRODUCER-CONSUMER-LABEL: func @same_memref_load_store +func.func @same_memref_load_store(%producer : memref<32xf32>, %consumer: memref<16xf32>){ + %cst = arith.constant 2.000000e+00 : f32 + // Source isn't removed. + // PRODUCER-CONSUMER: affine.for %{{.*}} = 0 to 32 + affine.for %arg3 = 0 to 32 { + %0 = affine.load %producer[%arg3] : memref<32xf32> + %2 = arith.mulf %0, %cst : f32 + affine.store %2, %producer[%arg3] : memref<32xf32> + } + affine.for %arg3 = 0 to 16 { + %0 = affine.load %producer[%arg3] : memref<32xf32> + %2 = arith.addf %0, %cst : f32 + affine.store %2, %consumer[%arg3] : memref<16xf32> + } + // Fused nest. + // PRODUCER-CONSUMER: affine.for %{{.*}} = 0 to 16 + // PRODUCER-CONSUMER-NEXT: affine.load %{{.*}}[%{{.*}}] : memref<32xf32> + // PRODUCER-CONSUMER-NEXT: arith.mulf + // PRODUCER-CONSUMER-NEXT: affine.store %{{.*}}, %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: affine.load %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: arith.addf + // PRODUCER-CONSUMER-NEXT: affine.store + // PRODUCER-CONSUMER-NEXT: } + return +} + +// PRODUCER-CONSUMER-LABEL: func @same_memref_load_multiple_stores +func.func @same_memref_load_multiple_stores(%producer : memref<32xf32>, %producer_2 : memref<32xf32>, %consumer: memref<16xf32>){ + %cst = arith.constant 2.000000e+00 : f32 + // Source isn't removed. + // PRODUCER-CONSUMER: affine.for %{{.*}} = 0 to 32 + affine.for %arg3 = 0 to 32 { + %0 = affine.load %producer[%arg3] : memref<32xf32> + %2 = arith.mulf %0, %cst : f32 + affine.store %2, %producer[%arg3] : memref<32xf32> + affine.store %2, %producer_2[%arg3] : memref<32xf32> + } + affine.for %arg3 = 0 to 16 { + %0 = affine.load %producer[%arg3] : memref<32xf32> + %1 = affine.load %producer_2[%arg3] : memref<32xf32> + %2 = arith.addf %0, %1 : f32 + affine.store %2, %consumer[%arg3] : memref<16xf32> + } + // Fused nest. + // PRODUCER-CONSUMER: affine.for %{{.*}} = 0 to 16 + // PRODUCER-CONSUMER-NEXT: affine.load %{{.*}}[%{{.*}}] : memref<32xf32> + // PRODUCER-CONSUMER-NEXT: arith.mulf + // PRODUCER-CONSUMER-NEXT: affine.store %{{.*}}, %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: affine.store %{{.*}}, %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: affine.load %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: affine.load %{{.*}}[0] : memref<1xf32> + // PRODUCER-CONSUMER-NEXT: arith.addf + // PRODUCER-CONSUMER-NEXT: affine.store + // PRODUCER-CONSUMER-NEXT: } + return +} diff --git a/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir index be0180fcf1763..8fbc74ec345c6 100644 --- a/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir +++ b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir @@ -280,26 +280,3 @@ module attributes {transform.with_named_sequence} { transform.yield } } - -// ----- - -// One of the _destination_ dimensions is dynamic (but _source_ dimensions are static). - -func.func private @insert_slice_dynamic_dest_dim(%source: tensor, %size: index) -> tensor { - %c2 = arith.constant 2 : index - %init = tensor.empty(%size) : tensor - - %source_slice = tensor.extract_slice %source[0, %c2, 0, 0] [1, 1, 5, 1] [1, 1, 1, 1] : tensor to tensor<5x1xi32> - // expected-error @+1 {{Attempted to vectorize, but failed}} - %res = tensor.insert_slice %source_slice into %init[0, %c2] [5, 1] [1, 1] : tensor<5x1xi32> into tensor - - return %res : tensor -} - - module attributes {transform.with_named_sequence} { - transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { - %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg0 : (!transform.any_op) -> !transform.any_op - transform.structured.vectorize %0 vector_sizes [8, 1] : !transform.any_op - transform.yield - } - } diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index a660144ab87fb..6d39262945de5 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -1130,14 +1130,14 @@ func.func private @insert_slice_static_sizes(%source: tensor) -> te // CHECK: %[[C_2:.*]] = arith.constant 2 : index // CHECK: %[[INIT:.*]] = tensor.empty() : tensor<5x3xi32> // CHECK: %[[SRC_SLICE:.*]] = tensor.extract_slice %[[SEC]][0, %[[C_2]], 0, 0] [1, 1, 5, 1] [1, 1, 1, 1] : tensor to tensor<5x1xi32> -// CHECK: %[[PAD:.*]] = arith.constant 0 : i32 -// CHECK: %[[C0:.*]] = arith.constant 0 : index -// CHECK: %[[C_5:.*]] = arith.constant 5 : index -// CHECK: %[[C_1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[PAD:.*]] = arith.constant 0 : i32 +// CHECK-DAG: %[[C_5:.*]] = arith.constant 5 : index +// CHECK-DAG: %[[C_1:.*]] = arith.constant 1 : index // CHECK: %[[MASK:.*]] = vector.create_mask %[[C_5]], %[[C_1]] : vector<8x1xi1> +// CHECK: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC_SLICE]][%[[C0]], %[[C0]]], %[[PAD]] : tensor<5x1xi32>, vector<8x1xi32> } : vector<8x1xi1> -> vector<8x1xi32> // CHECK: %[[C_0:.*]] = arith.constant 0 : index -// CHECK: %[[RES:.*]] = vector.transfer_write %[[READ]], %[[INIT]][%[[C_0]], %[[C_2]]] : vector<8x1xi32>, tensor<5x3xi32> +// CHECK: %[[RES:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[INIT]][%[[C_0]], %[[C_2]]] : vector<8x1xi32>, tensor<5x3xi32> } : vector<8x1xi1> -> tensor<5x3xi32> // CHECK: return %[[RES]] : tensor<5x3xi32> module attributes {transform.with_named_sequence} { @@ -1170,11 +1170,11 @@ func.func private @insert_slice_dynamic_src_dim(%source: tensor, %s // CHECK: %[[SRC_SLICE:.*]] = tensor.extract_slice %[[SRC]][0, %[[C_2]], 0, 0] [1, 1, %[[SIZE]], 1] [1, 1, 1, 1] : tensor to tensor // CHECK-DAG: %[[PAD:.*]] = arith.constant 0 : i32 // CHECK-DAG: %[[C_1:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[C_0:.*]] = arith.constant 0 : index // CHECK: %[[MASK:.*]] = vector.create_mask %[[SIZE]], %[[C_1]] : vector<8x1xi1> +// CHECK: %[[C_0:.*]] = arith.constant 0 : index // CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC_SLICE]][%[[C_0]], %[[C_0]]], %[[PAD]] : tensor, vector<8x1xi32> } : vector<8x1xi1> -> vector<8x1xi32> // CHECK: %[[C_0_1:.*]] = arith.constant 0 : index -// CHECK: %[[RES:.*]] = vector.transfer_write %[[READ]], %[[INIT]][%[[C_0_1]], %[[C_2]]] : vector<8x1xi32>, tensor<5x3xi32> +// CHECK: %[[RES:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[INIT]][%[[C_0_1]], %[[C_2]]] : vector<8x1xi32>, tensor<5x3xi32> } : vector<8x1xi1> -> tensor<5x3xi32> // CHECK: return %[[RES]] : tensor<5x3xi32> module attributes {transform.with_named_sequence} { @@ -1184,3 +1184,78 @@ func.func private @insert_slice_dynamic_src_dim(%source: tensor, %s transform.yield } } + +// ----- + +// One of the _destination_ dimensions is dynamic (but _source_ dimensions are static). + +func.func private @insert_slice_dynamic_dest_dim(%source: tensor, %size: index) -> tensor { + %c2 = arith.constant 2 : index + %init = tensor.empty(%size) : tensor + + %source_slice = tensor.extract_slice %source[0, %c2, 0, 0] [1, 1, 5, 1] [1, 1, 1, 1] : tensor to tensor<5x1xi32> + %res = tensor.insert_slice %source_slice into %init[0, %c2] [5, 1] [1, 1] : tensor<5x1xi32> into tensor + + return %res : tensor +} + +// CHECK-LABEL: func.func private @insert_slice_dynamic_dest_dim( +// CHECK-SAME: %[[SRC:.*]]: tensor, +// CHECK-SAME: %[[SIZE:.*]]: index) -> tensor { +// CHECK: %[[C_2:.*]] = arith.constant 2 : index +// CHECK: %[[INIT:.*]] = tensor.empty(%[[SIZE]]) : tensor +// CHECK: %[[SRC_SLICE:.*]] = tensor.extract_slice %[[SRC]][0, %[[C_2]], 0, 0] [1, 1, 5, 1] [1, 1, 1, 1] : tensor to tensor<5x1xi32> +// CHECK: %[[PAD:.*]] = arith.constant 0 : i32 +// CHECK: %[[C_5:.*]] = arith.constant 5 : index +// CHECK: %[[C_1:.*]] = arith.constant 1 : index +// CHECK: %[[MASK:.*]] = vector.create_mask %[[C_5]], %[[C_1]] : vector<8x1xi1> +// CHECK: %[[C_0:.*]] = arith.constant 0 : index +// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC_SLICE]][%[[C_0]], %[[C_0]]], %[[PAD]] : tensor<5x1xi32>, vector<8x1xi32> } : vector<8x1xi1> -> vector<8x1xi32> +// CHECK: %[[C_0_1:.*]] = arith.constant 0 : index +// CHECK: %[[WRITE:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[INIT]][%[[C_0_1]], %[[C_2]]] : vector<8x1xi32>, tensor } : vector<8x1xi1> -> tensor +// CHECK: return %[[WRITE]] : tensor + + module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg0 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 1] : !transform.any_op + transform.yield + } + } + +// ----- + +// At least one _source_ and one _destination_ dimensions are dynamic. + +func.func private @insert_slice_dynamic_source_and_dest_dim(%source: tensor, %size: index) -> tensor { + %c2 = arith.constant 2 : index + %init = tensor.empty(%size) : tensor + + %source_slice = tensor.extract_slice %source[0, %c2, 0, 0] [1, 1, %size, 1] [1, 1, 1, 1] : tensor to tensor + %res = tensor.insert_slice %source_slice into %init[0, %c2] [%size, 1] [1, 1] : tensor into tensor + + return %res : tensor +} + +// CHECK-LABEL: func.func private @insert_slice_dynamic_source_and_dest_dim( +// CHECK-SAME: %[[SRC:.*]]: tensor, +// CHECK-SAME: %[[SIZE:.*]]: index) -> tensor { +// CHECK: %[[C_2:.*]] = arith.constant 2 : index +// CHECK: %[[INIT:.*]] = tensor.empty(%[[SIZE]]) : tensor +// CHECK: %[[SRC_SIZE:.*]] = tensor.extract_slice %[[SRC]][0, %[[C_2]], 0, 0] [1, 1, %[[SIZE]], 1] [1, 1, 1, 1] : tensor to tensor +// CHECK: %[[PAD:.*]] = arith.constant 0 : i32 +// CHECK: %[[C1:.*]] = arith.constant 1 : index +// CHECK: %[[MASK:.*]] = vector.create_mask %[[SIZE]], %[[C1]] : vector<8x1xi1> +// CHECK: %[[C0:.*]] = arith.constant 0 : index +// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC_SIZE]]{{\[}}%[[C0]], %[[C0]]], %[[PAD]] : tensor, vector<8x1xi32> } : vector<8x1xi1> -> vector<8x1xi32> +// CHECK: %[[C_0_1:.*]] = arith.constant 0 : index +// CHECK: %[[WRITE:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[INIT]]{{\[}}%[[C_0_1]], %[[C_2]]] : vector<8x1xi32>, tensor } : vector<8x1xi1> -> tensor +// CHECK: return %[[WRITE]] : tensor + + module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg0 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 1] : !transform.any_op + transform.yield + } + } diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir index c879b901311f2..d9957ad804161 100644 --- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir @@ -196,3 +196,63 @@ func.func @group_fmul(%value: f32) -> f32 { %0 = spirv.KHR.GroupFMul %value : f32 return %0: f32 } + +// ----- + +//===----------------------------------------------------------------------===// +// spirv.GroupNonUniformBallotBitCount +//===----------------------------------------------------------------------===// + +func.func @group_non_uniform_ballot_bit_count(%value: vector<4xi32>) -> i32 { + // CHECK: {{%.*}} = spirv.GroupNonUniformBallotBitCount {{%.*}} : vector<4xi32> -> i32 + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_scope(%value: vector<4xi32>) -> i32 { + // expected-error @+1 {{execution_scope must be Scope of value Subgroup}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_value_len(%value: vector<3xi32>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<3xi32>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<3xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_value_type(%value: vector<4xi8>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<4xi8>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi8> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_value_sign(%value: vector<4xsi32>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<4xsi32>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xsi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_result_type(%value: vector<4xi32>) -> f32 { + // expected-error @+1 {{result #0 must be 8/16/32/64-bit signless/unsigned integer, but got 'f32'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> f32 + return %0: f32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_result_sign(%value: vector<4xi32>) -> si32 { + // expected-error @+1 {{result #0 must be 8/16/32/64-bit signless/unsigned integer, but got 'si32'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> si32 + return %0: si32 +} diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index e0e1de6a94d10..582fd77cd7bc8 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -542,17 +542,20 @@ func.func @reduce_sum_nofold(%arg0: tensor) -> tensor { // CHECK-LABEL: @reshape_canonicalize func.func @reshape_canonicalize(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor - return %0 : tensor + %0 = "tosa.const_shape"() {value = dense<[-1, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %0 : (tensor, !tosa.shape<2>) -> tensor + return %1 : tensor } // ----- // CHECK-LABEL: @reshape_canonicalize_dyn_nofold func.func @reshape_canonicalize_dyn_nofold(%arg0: tensor) -> tensor { - // CHECK: %[[VAR0:.+]] = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor + // CHECK: %[[SHAPE:.+]] = tosa.const_shape {value = dense<[-1, 2, 10]> : tensor<3xindex>} : () -> !tosa.shape<3> + // CHECK: %[[VAR0:.+]] = tosa.reshape %arg0, %[[SHAPE]] : (tensor, !tosa.shape<3>) -> tensor // CHECK: return %[[VAR0]] : tensor - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor + %s = "tosa.const_shape"() {value = dense<[-1, 2, 10]> : tensor<3xindex>} : () -> !tosa.shape<3> + %0 = tosa.reshape %arg0, %s : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -560,10 +563,13 @@ func.func @reshape_canonicalize_dyn_nofold(%arg0: tensor) -> tensor< // CHECK-LABEL: @reshape_canonicalize_double func.func @reshape_canonicalize_double(%arg0: tensor) -> tensor { - // CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAL_0:.*]] = tosa.const_shape {value = dense<[-1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0, %[[VAL_0]] // CHECK: return %[[VAL_1]] - %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor<5x?xf32> - %1 = tosa.reshape %0 {new_shape = array}: (tensor<5x?xf32>) -> tensor + %cst0 = "tosa.const_shape"() <{value = dense<[5, -1]> : tensor<2xindex>}> : () -> !tosa.shape<2> + %0 = tosa.reshape %arg0, %cst0 : (tensor, !tosa.shape<2>) -> tensor<5x?xf32> + %cst1 = "tosa.const_shape"() <{value = dense<[-1, 5]> : tensor<2xindex>}> : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %cst1 : (tensor<5x?xf32>, !tosa.shape<2>) -> tensor return %1 : tensor } @@ -574,8 +580,9 @@ func.func @reshape_canonicalize_const() -> tensor<1x5xi32> { // CHECK: %[[VAR0:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 1, 2, 3, 4]]> : tensor<1x5xi32>} // CHECK: return %[[VAR0]] %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x5xi32> - return %1 : tensor<1x5xi32> + %1 = "tosa.const_shape"() {value = dense<[1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %0, %1 : (tensor<5xi32>, !tosa.shape<2>) -> tensor<1x5xi32> + return %2 : tensor<1x5xi32> } // ----- @@ -584,7 +591,8 @@ func.func @reshape_canonicalize_const() -> tensor<1x5xi32> { func.func @reshape_canonicalize_const_dynamic() -> tensor<1x?xi32> { // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x?xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<5xi32>, !tosa.shape<2>) -> tensor<1x?xi32> return %1 : tensor<1x?xi32> } @@ -596,7 +604,8 @@ func.func @reshape_canonicalize_const_splat() -> (tensor<10xi32>, tensor<1x10xi3 // CHECK-DAG: %[[VAR1:.+]] = "tosa.const"() <{value = dense<0> : tensor<1x10xi32>} // CHECK: return %[[VAR0]], %[[VAR1]] %0 = "tosa.const"() {value = dense<0> : tensor<10xi32>} : () -> tensor<10xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<10xi32>) -> tensor<1x10xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<10xi32>, !tosa.shape<2>) -> tensor<1x10xi32> return %0 , %1 : tensor<10xi32>, tensor<1x10xi32> } @@ -606,7 +615,8 @@ func.func @reshape_canonicalize_const_splat() -> (tensor<10xi32>, tensor<1x10xi3 func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32>) { // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi32>} : ()-> tensor<3xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<3xi32>) -> tensor<1x3xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<3xi32>, !tosa.shape<2>) -> tensor<1x3xi32> return %0 , %1 : tensor<3xi32>, tensor<1x3xi32> } @@ -616,9 +626,10 @@ func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32 func.func @reshape_canonicalize_quant_nofold() -> (tensor<1x3x!quant.uniform>) { // disabled folding for quantized element types // CHECK{LITERAL}: "tosa.const"() <{value = dense<[1, 2, 3]> : tensor<3xi8>}> : () -> tensor<3x!quant.uniform> - // CHECK{LITERAL}: tosa.reshape %0 {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> + // CHECK{LITERAL}: tosa.reshape %0, %1 : (tensor<3x!quant.uniform>, !tosa.shape<2>) -> tensor<1x3x!quant.uniform> %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi8>} : ()-> tensor<3x!quant.uniform> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> + %2 = "tosa.const_shape"() {value = dense<[1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<3x!quant.uniform>, !tosa.shape<2>) -> tensor<1x3x!quant.uniform> return %1 : tensor<1x3x!quant.uniform> } @@ -626,8 +637,9 @@ func.func @reshape_canonicalize_quant_nofold() -> (tensor<1x3x!quant.uniform (tensor<2x1x3x!quant.uniform>) { - // CHECK: "tosa.const"() <{value = dense<0> : tensor<1x2x3xi8>}> : () -> tensor<1x2x3x!quant.uniform> - // CHECK: tosa.reshape %0 {new_shape = array} : (tensor<1x2x3x!quant.uniform>) -> tensor<2x1x3x!quant.uniform> + // CHECK-DAG: tosa.const_shape {value = dense<[2, 1, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> + // CHECK-DAG: "tosa.const"() <{value = dense<0> : tensor<1x2x3xi8>}> : () -> tensor<1x2x3x!quant.uniform> + // CHECK: tosa.reshape %0, %1 : (tensor<1x2x3x!quant.uniform>, !tosa.shape<3>) -> tensor<2x1x3x!quant.uniform> %perms = "tosa.const"() {value = dense<[1, 0, 2]> : tensor<3xi32>} : () -> tensor<3xi32> %0 = "tosa.const"() {value = dense<0> : tensor<1x2x3xi8>} : ()-> tensor<1x2x3x!quant.uniform> %1 = tosa.transpose %0, %perms : (tensor<1x2x3x!quant.uniform>, tensor<3xi32>) -> tensor<2x1x3x!quant.uniform> @@ -691,7 +703,8 @@ func.func @transpose_no_op(%arg0: tensor<3x4x5x6xf32>) -> tensor<3x4x5x6xf32> { // CHECK-LABEL: @transpose_is_reshape func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> { - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> + // CHECK: %[[CONST0:.+]] = tosa.const_shape {value = dense<[1, 4, 1, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: tosa.reshape %arg0, %[[CONST0]] %perms = "tosa.const"() <{value = dense<[3, 1, 0, 2]> : tensor<4xi32>}> : () -> tensor<4xi32> %0 = tosa.transpose %arg0, %perms : (tensor<1x4x5x1xf32>, tensor<4xi32>) -> tensor<1x4x1x5xf32> return %0 : tensor<1x4x1x5xf32> @@ -704,7 +717,8 @@ func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf3 func.func @single_bit_reshape() -> tensor<1xi1> { // CHECK: "tosa.const"() <{value = dense : tensor<1xi1>} %0 = arith.constant dense : tensor<1x1xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x1xi1>) -> tensor<1xi1> + %2 = "tosa.const_shape"() <{value = dense<1> : tensor<1xindex>}> : () -> !tosa.shape<1> + %1 = tosa.reshape %0, %2 : (tensor<1x1xi1>, !tosa.shape<1>) -> tensor<1xi1> return %1 : tensor<1xi1> } @@ -870,8 +884,11 @@ func.func nested @fold_tile_rank_zero() -> tensor { // check that segfault is fixed func.func @reshape_quant_nofold() -> tensor<1x1x1x1xi32> { %0 = "tosa.const"() {value = dense<127> : tensor} : () -> tensor> - %1 = tosa.reshape %0 {new_shape = array} : (tensor>) -> tensor<1x1x1x1x!quant.uniform> - %2 = tosa.rescale %1 {double_round = true, input_zp = -128 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<1x1x1x1x!quant.uniform>) -> tensor<1x1x1x1xi32> + %cst0 = "tosa.const_shape"() {value = dense<[1, 1, 1, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> + %1 = tosa.reshape %0, %cst0 : (tensor>, !tosa.shape<4>) -> tensor<1x1x1x1x!quant.uniform> + %multiplier = "tosa.const"() {value = dense<1073741824> : tensor<1xi32> } : () -> tensor<1xi32> + %shift = "tosa.const"() {value = dense<30> : tensor<1xi8> } : () -> tensor<1xi8> + %2 = tosa.rescale %1 {double_round = true, input_zp = -128 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array, input_unsigned = false, output_unsigned = false} : (tensor<1x1x1x1x!quant.uniform>) -> tensor<1x1x1x1xi32> return %2 : tensor<1x1x1x1xi32> } diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir index 32677f06e2252..40469987d89d0 100644 --- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir +++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir @@ -500,7 +500,8 @@ func.func @fold_eq_i32(%arg0 : tensor<10xi32>) -> (tensor<10xi1>) { func.func @reshape_splat() -> tensor<6x5x4xi32> { // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<6x5x4xi32>} %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32> - %reshape = tosa.reshape %splat { new_shape = array } : (tensor<4x5x6xi32>) -> tensor<6x5x4xi32> + %const = tosa.const_shape {value = dense<[6, 5, 4]> : tensor<3xindex>} : () -> !tosa.shape<3> + %reshape = tosa.reshape %splat, %const : (tensor<4x5x6xi32>, !tosa.shape<3>) -> tensor<6x5x4xi32> // CHECK: return %[[SPLAT]] return %reshape : tensor<6x5x4xi32> } diff --git a/mlir/test/Dialect/Tosa/inlining.mlir b/mlir/test/Dialect/Tosa/inlining.mlir index e892fdaa27750..2a3065e80d0ea 100644 --- a/mlir/test/Dialect/Tosa/inlining.mlir +++ b/mlir/test/Dialect/Tosa/inlining.mlir @@ -47,7 +47,8 @@ func.func @inlined_while_fn(%arg0: tensor, %arg1: tensor, %arg2: tenso } func.func private @while_body_50(%arg0: tensor, %arg1: tensor, %arg2: tensor, %arg3: tensor<10xi32>) -> (tensor, tensor, tensor, tensor<10xi32>) { %1 = "tosa.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - %3 = "tosa.reshape"(%1) {new_shape = array} : (tensor) -> tensor<1xi32> + %4 = "tosa.const_shape"() {value = dense<1> : tensor<1xindex>} : () -> !tosa.shape<1> + %3 = "tosa.reshape"(%1, %4) : (tensor, !tosa.shape<1>) -> tensor<1xi32> %2 = "tosa.add"(%arg3, %3) : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> return %1, %arg1, %arg2, %2: tensor, tensor, tensor, tensor<10xi32> } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index 006c5bd52a9f6..2165e1f7ae3ba 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -316,7 +316,8 @@ func.func @test_transpose_element_type_mismatch(%arg0: tensor<2x3xi32>) -> tenso func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2x3xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %3 = tosa.const_shape {value = dense<[273, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %3 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op weight of fully_connected is not constant}} %2 = tosa.fully_connected %1, %arg1, %0 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> @@ -326,7 +327,8 @@ func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: ten func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<[[-0.613216758, -0.63714242, -0.73500061], [0.180762768, 0.773053169, -0.933686495]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %3 = tosa.const_shape {value = dense<[273, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %3 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op bias of fully_connected is not constant}} %2 = tosa.fully_connected %1, %0, %arg1 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> @@ -426,81 +428,91 @@ func.func @test_reduce_min_invalid_output_rank(%arg0 : tensor) -> () { // ----- func.func @test_reshape_type_mismatch(%arg0 : tensor<13x21x3xf32>) -> () { + %1 = tosa.const_shape {value = dense<[13, 21, 3, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reshape' op inferred type(s) 'tensor<13x21x3x1xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3x1xi32>'}} - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<13x21x3x1xi32> + %0 = tosa.reshape %arg0, %1 : (tensor<13x21x3xf32>, !tosa.shape<4>) -> tensor<13x21x3x1xi32> return } // ----- func.func @test_reshape_static_zero_dim_input(%arg0 : tensor<13x0x3xf32>) -> () { + %s = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor<13x0x3xf32>'}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x0x3xf32>) -> tensor<13x0x3xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<13x0x3xf32>, !tosa.shape<3>) -> tensor<13x0x3xf32> return } // ----- func.func @test_reshape_zero_dim_input(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor'}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<13x0x3xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<13x0x3xf32> return } // ----- func.func @test_reshape_rank_mismatch(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op new shape does not match result rank}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_inconsistent_result_type(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, 4, -1]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op new shape is inconsistent with result shape}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return } // ----- func.func @test_reshape_invalid_size(%arg0 : tensor<2x4xf32>) -> () { + %s = tosa.const_shape {value = dense<[3, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 8 elements into 15}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor<3x5xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor<3x5xf32> return } // ----- func.func @test_reshape_invalid_newshape(%arg0 : tensor<1xf32>) -> () { + %s = tosa.const_shape {value = dense<[-1, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 1 elements into 4}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1xf32>) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1xf32>, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_invalid_newshape(%arg0 : tensor<8xf32>) -> () { + %s = tosa.const_shape {value = dense<[1, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 8 elements into 4}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<8xf32>) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor<8xf32>, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_invalid_placeholders(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, -1, -1]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op expected at most one target dimension to be -1}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?x?xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x?x?xf32> return } // ----- func.func @test_reshape_invalid_tensor_dim(%arg0 : tensor<4x?xf32>) -> () { + %s = tosa.const_shape {value = dense<[-2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op new shape has invalid tensor dimension size -2}} - %0 = "tosa.reshape" (%arg0) {new_shape = array} : (tensor<4x?xf32>) -> tensor + %0 = "tosa.reshape" (%arg0, %s) : (tensor<4x?xf32>, !tosa.shape<2>) -> tensor return } @@ -514,6 +526,15 @@ func.func @test_reverse_axis_out_of_range(%arg0 : tensor<13x21x3xf32>) -> () { // ----- +func.func @test_reshape_zero_dim_input(%arg0 : tensor) -> () { + %1 = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> + // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor'}} + %0 = "tosa.reshape"(%arg0, %1) : (tensor, !tosa.shape<3>) -> tensor<13x0x3xf32> + return +} + +// ----- + func.func @test_const_attribute_type_mismatch() -> tensor<100x100xf32> { // expected-error@+1 {{'tosa.const' op failed to verify that all of {value, output} have same shape}} %0 = "tosa.const"() {value = dense<0.000000e+00> : tensor<1x1xf32>} : () -> tensor<100x100xf32> diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index 26bebdd898a0d..a7f76f2d0fa64 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -70,8 +70,9 @@ func.func @test_concat(%arg0: tensor<1x1x1x13x21x3x8xf32>, %arg1: tensor<1x1x1x1 // ----- func.func @test_reshape(%arg0: tensor<13x21x3xf32>) -> tensor<1x1x1x1x1x1x819xf32> { + %1 = tosa.const_shape {value = dense<[1, 1, 1, 1, 1, 1, 819]> : tensor<7xindex>} : () -> !tosa.shape<7> // expected-error@+1 {{'tosa.reshape' op failed level check: result rank(shape) <= MAX_RANK}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x1x1x1x1x1x819xf32> + %0 = "tosa.reshape"(%arg0, %1) : (tensor<13x21x3xf32>, !tosa.shape<7>) -> tensor<1x1x1x1x1x1x819xf32> return %0 : tensor<1x1x1x1x1x1x819xf32> } diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index d00230d12aab1..baf09e089aa30 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -504,7 +504,8 @@ func.func @test_greater_equal(%arg0: tensor<13x1x3xf32>, %arg1: tensor<13x21x3xf // CHECK-LABEL: reduce_all func.func @test_reduce_all(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { %0 = tosa.reduce_all %arg0 {axis = 0 : i32} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xi1>, !tosa.shape<2>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } @@ -512,7 +513,8 @@ func.func @test_reduce_all(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { // CHECK-LABEL: reduce_any func.func @test_reduce_any(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { %0 = tosa.reduce_any %arg0 {axis = 0 : i32} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xi1>, !tosa.shape<2>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } @@ -520,7 +522,8 @@ func.func @test_reduce_any(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { // CHECK-LABEL: reduce_max func.func @test_reduce_max(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_max %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -528,7 +531,8 @@ func.func @test_reduce_max(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_min func.func @test_reduce_min(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_min %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -536,7 +540,8 @@ func.func @test_reduce_min(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_product func.func @test_reduce_product(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_prod %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -544,7 +549,8 @@ func.func @test_reduce_product(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_sum func.func @test_reduce_sum(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_sum %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -575,7 +581,8 @@ func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3 // ----- // CHECK-LABEL: reshape func.func @test_reshape(%arg0: tensor<13x21x3xf32>) -> tensor<1x819xf32> { - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x819xf32> + %1 = tosa.const_shape {value = dense<[1, 819]> : tensor<2xindex>} : () -> !tosa.shape<2> + %0 = tosa.reshape %arg0, %1 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<1x819xf32> return %0 : tensor<1x819xf32> } @@ -724,7 +731,8 @@ func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor) { ^bb0(%arg2: tensor, %arg3: tensor, %arg4: tensor<10xi32>): %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor %3 = tosa.add %arg3, %2 : (tensor, tensor) -> tensor - %4 = tosa.reshape %2 {new_shape = array} : (tensor) -> tensor<1xi32> + %7 = tosa.const_shape {value = dense<[1]> : tensor<1xindex>} : () -> !tosa.shape<1> + %4 = tosa.reshape %2, %7 : (tensor, !tosa.shape<1>) -> tensor<1xi32> %5 = tosa.add %arg4, %4 : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> %6 = tosa.add %arg2, %2 : (tensor, tensor) -> tensor tosa.yield %6, %3, %5 : tensor, tensor, tensor<10xi32> diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir index e4a2897908072..9aade2fe45eb6 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir @@ -5,13 +5,16 @@ // CHECK-LABEL: @conv2d_as_fully_connected func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<3x1x1x2xf32>, %arg2: tensor<3xf32>) -> tensor<4x10x10x3xf32> { // CHECK-NOT: tosa.conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[400, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[4, 10, 10, 3]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<400x2xf32> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<3x2xf32> // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: -> tensor<400x3xf32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x3xf32> // CHECK: return %[[VAR3]] %0 = tosa.conv2d %arg0, %arg1, %arg2 {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> @@ -23,14 +26,17 @@ func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor // CHECK-LABEL: @conv2d_as_fully_connected_quant func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x10x10x3xi32> { // CHECK-NOT: tosa.conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[400, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[4, 10, 10, 3]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<400x2xi8> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<3x2xi8> // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: {input_zp = 42 : i32, weight_zp = 24 : i32} // CHECK-SAME: -> tensor<400x3xi32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x3xi32> // CHECK: return %[[VAR3]] %input_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> @@ -42,14 +48,14 @@ func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: t // ----- // CHECK-LABEL: func.func @conv_with_dynamic_dim( -// CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<384x1x1x64xi8>, -// CHECK-SAME: %[[VAL_2:.*]]: tensor<384xi32>) -> tensor { func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384x1x1x64xi8>, %arg2: tensor<384xi32>) -> tensor { -// CHECK: %[[VAL_3:.*]] = tosa.reshape %[[VAL_0]] {new_shape = array} : (tensor) -> tensor -// CHECK: %[[VAL_4:.*]] = tosa.reshape %[[VAL_1]] {new_shape = array} : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8> -// CHECK: %[[VAL_5:.*]] = tosa.fully_connected %[[VAL_3]], %[[VAL_4]], %[[VAL_2]] {input_zp = -6 : i32, weight_zp = 11 : i32} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor -// CHECK: %[[VAL_6:.*]] = tosa.reshape %[[VAL_5]] {new_shape = array} : (tensor) -> tensor +// CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[-1, 64]> : tensor<2xindex>} : () -> !tosa.shape<2> +// CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[384, 64]> : tensor<2xindex>} : () -> !tosa.shape<2> +// CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[-1, 14, 14, 384]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK: %[[VAL_3:.*]] = tosa.reshape %arg0, %[[CONST0]] +// CHECK: %[[VAL_4:.*]] = tosa.reshape %arg1, %[[CONST1]] : (tensor<384x1x1x64xi8>, !tosa.shape<2>) -> tensor<384x64xi8> +// CHECK: %[[VAL_5:.*]] = tosa.fully_connected %[[VAL_3]], %[[VAL_4]], %arg2 {input_zp = -6 : i32, weight_zp = 11 : i32} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor +// CHECK: %[[VAL_6:.*]] = tosa.reshape %[[VAL_5]], %[[CONST2]] : (tensor, !tosa.shape<4>) -> tensor // CHECK: return %[[VAL_6]] : tensor // CHECK: } %input_zp = "tosa.const"() {value = dense<-6> : tensor<1xi8>} : () -> tensor<1xi8> @@ -62,15 +68,19 @@ func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384 // CHECK-LABEL: @conv2d_as_fully_connected_padded func.func @conv2d_as_fully_connected_padded(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x12x12x3xi32> { + // CHECK-DAG: %[[FULLY_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[4, 12, 12, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[576, 2]> : tensor<2xindex>} + // CHECK-DAG: %[[FILTER_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} // CHECK-DAG: %[[PAD_SHAPE:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() <{value = dense<42> : tensor} // CHECK-DAG: %[[PAD:.+]] = tosa.pad %arg0, %[[PAD_SHAPE]], %[[PAD_VAL]] : (tensor<4x10x10x2xi8>, !tosa.shape<8>, tensor) -> tensor<4x12x12x2xi8> - // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = tosa.reshape %[[PAD]] {new_shape = array} - // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = tosa.reshape %[[PAD]], %[[INPUT_NEW_SHAPE]] + // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = tosa.reshape %arg1, %[[FILTER_NEW_SHAPE]] // CHECK-DAG: %[[FULLY:.+]] = tosa.fully_connected %[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2 {input_zp = 42 : i32, weight_zp = 24 : i32} - // CHECK: %[[RESHAPE:.+]] = tosa.reshape %[[FULLY]] {new_shape = array} + // CHECK: %[[RESHAPE:.+]] = tosa.reshape %[[FULLY]], %[[FULLY_NEW_SHAPE]] %input_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<24> : tensor<1xi8>} : () -> tensor<1xi8> %0 = tosa.conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<4x12x12x3xi32> return %0 : tensor<4x12x12x3xi32> } + diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir index ce29d1a498b4f..6562a7c2ab55c 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir @@ -5,15 +5,19 @@ // CHECK-LABEL: @depthwise_conv2d_as_mul func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { // CHECK-NOT: tosa.depthwise_conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex> + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex> + // CHECK-DAG: %[[CONST2:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 6]> : tensor<4xindex> + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<4x10x10x2x1xf32> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<1x1x1x2x3xf32> // CHECK: %[[VAR2:.*]] = tosa.mul %[[VAR0]], %[[VAR1]] // CHECK-SAME: -> tensor<4x10x10x2x3xf32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x6xf32> - // CHECK: %[[VAR4:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAR4:.*]] = tosa.reshape %arg2, %[[CONST3]] // CHECK-SAME: -> tensor<1x1x1x6xf32> // CHECK: %[[VAR5:.*]] = tosa.add %[[VAR3]], %[[VAR4]] // CHECK-SAME: -> tensor<4x10x10x6xf32> @@ -26,17 +30,22 @@ func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1 // CHECK-LABEL: @depthwise_conv2d_as_mul_q func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<1x1x2x3xi8>, %arg2: tensor<6xi32>) -> tensor<4x10x10x6xi32> { + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex> // CHECK-DAG: %[[iZp:.+]] = "tosa.const"() <{value = dense<7> : tensor<1x1x1x1x1xi32>} // CHECK-DAG: %[[wZp:.+]] = "tosa.const"() <{value = dense<11> : tensor<1x1x1x1xi32>} - // CHECK: %[[rIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex> + // CHECK-DAG: %[[CONST4:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 6]> : tensor<4xindex> + // CHECK-DAG: %[[CONST5:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex> + // CHECK-DAG: %[[SHIFT:.*]] = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK: %[[rIn:.+]] = tosa.reshape %arg0, %[[CONST0]] // CHECK: %[[cIn:.+]] = tosa.cast %[[rIn]] : (tensor<4x10x10x2x1xi8>) -> tensor<4x10x10x2x1xi32> // CHECK: %[[cWe:.+]] = tosa.cast %arg1 : (tensor<1x1x2x3xi8>) -> tensor<1x1x2x3xi32> // CHECK: %[[sIn:.+]] = tosa.sub %[[cIn]], %[[iZp]] // CHECK: %[[sWe:.+]] = tosa.sub %[[cWe]], %[[wZp]] - // CHECK: %[[resWe:.+]] = tosa.reshape %[[sWe]] {new_shape = array} - // CHECK: %[[mul:.+]] = tosa.mul %[[sIn]], %[[resWe]] - // CHECK: %[[reO:.+]] = tosa.reshape %[[mul]] {new_shape = array} - // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[resWe:.+]] = tosa.reshape %[[sWe]], %[[CONST3]] + // CHECK: %[[mul:.+]] = tosa.mul %[[sIn]], %[[resWe]], %[[SHIFT]] + // CHECK: %[[reO:.+]] = tosa.reshape %[[mul]], %[[CONST4]] + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2, %[[CONST5]] // CHECK: %[[add:.+]] = tosa.add %[[reO]], %[[reArg2]] %input_zp = "tosa.const"() {value = dense<7> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<11> : tensor<1xi8>} : () -> tensor<1xi8> @@ -48,14 +57,19 @@ func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor< // CHECK-LABEL: @depthwise_conv2d_as_mul_padded func.func @depthwise_conv2d_as_mul_padded(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x12x12x6xf32> { - // CHECK-DAG: %[[pad:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0, 0, 0]> : tensor<10xindex>} : () -> !tosa.shape<10> + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex>} + // CHECK-DAG: %[[pad:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0, 0, 0]> : tensor<10xindex>} : () -> !tosa.shape<10> // CHECK-DAG: %[[zero:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor} - // CHECK: %[[reIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex>} + // CHECK-DAG: %[[CONST4:.+]] = tosa.const_shape {value = dense<[4, 12, 12, 6]> : tensor<4xindex>} + // CHECK-DAG: %[[CONST5:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex>} + // CHECK-DAG: %[[SHIFT:.*]] = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK: %[[reIn:.+]] = tosa.reshape %arg0, %[[CONST0]] // CHECK: %[[padded:.+]] = tosa.pad %[[reIn]], %[[pad]], %[[zero]] : (tensor<4x10x10x2x1xf32>, !tosa.shape<10>, tensor) -> tensor<4x12x12x2x1xf32> - // CHECK: %[[reArg1:.+]] = tosa.reshape %arg1 {new_shape = array} - // CHECK: %[[mul:.+]] = tosa.mul %[[padded]], %[[reArg1]] - // CHECK: %[[reOut:.+]] = tosa.reshape %[[mul]] {new_shape = array} - // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[reArg1:.+]] = tosa.reshape %arg1, %[[CONST3]] + // CHECK: %[[mul:.+]] = tosa.mul %[[padded]], %[[reArg1]], %[[SHIFT]] + // CHECK: %[[reOut:.+]] = tosa.reshape %[[mul]], %[[CONST4]] + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2, %[[CONST5]] // CHECK: %[[add:.+]] = tosa.add %[[reOut]], %[[reArg2]] %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32> return %0 : tensor<4x12x12x6xf32> diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir index 82838cc7e1545..bd18b7ea0fdff 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -56,11 +56,15 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor< // CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] - // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>} + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]] // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] - // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[30, 2, 2, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]] // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32} // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32} + // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> @@ -70,13 +74,14 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor< // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<30xf32>} // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]] {acc_type = f32, dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] - // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 + // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex> + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[CONST8]] + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] + // CHECK-DAG: %[[CONST9:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST9]] // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2{acc_type = f32, out_pad = array, out_shape = array, stride = array} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> %1 = tensor.cast %0 : tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32> @@ -92,11 +97,15 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] {input_zp = 42 : i32} - // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>} + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]] // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] - // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[30, 2, 2, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]] // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32} // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32} + // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> @@ -108,13 +117,14 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>} // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>} // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[CONV_NEW_SHAPE:.*]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONV_NEW_SHAPE]] // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] - // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} - // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} - // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 + // CHECK-DAG: %[[TEANS_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[TEANS_NEW_SHAPE]] + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] + // CHECK-DAG: %[[ARG2_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[ARG2_NEW_SHAPE]] // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] %input_zp = "tosa.const"() {value = dense<-22> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> @@ -126,25 +136,31 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-LABEL: @transpose_conv2d_strided_overpad func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) { - // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 2, 1, 1, 2, 1]> : tensor<6xindex>} // CHECK-DAG: %[[WEIGHT_PERMS:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[2, 2, 1, 1]> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi32>} + // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[1, 17, 1, 1, 2, 1]> : tensor<6xindex>} // CHECK-DAG: %[[RESULT_PERMS:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[1, 17, 2, 1]> : tensor<4xindex>} + // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[CONST10:.+]] = tosa.const_shape {value = dense<1> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_ZP:.*]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}> + // CHECK-DAG: %[[WEIGHT_ZP:.*]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}> // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]] {input_zp = 93 : i32} - // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]] {new_shape = array} + // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]], %[[CONST1]] // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = tosa.transpose %[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]] - // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]] {new_shape = array} + // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]], %[[CONST3]] // CHECK: %[[REVERSE:.+]] = tosa.reverse %[[RESHAPE_WEIGHT_1]] {axis = 1 : i32} // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]] {input_zp = -103 : i32} - // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]] - // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], input_zp = -103 : i32, weight_zp = 93 : i32, stride = [1, 1]} - // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} + // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK: %[[TRANSPOSE_RESULT:.+]] = tosa.transpose %[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]] - // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]] {new_shape = array} + // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]], %[[CONST8]] // CHECK: %[[PAD_RESULT:.+]] = tosa.pad %[[RESHAPE_RESULT_1]], %[[RESULT_PAD]] - // CHECK: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST10]] // CHECK: %[[ADD:.+]] = tosa.add %[[PAD_RESULT]], %[[RESHAPE_ARG2]] %input_zp = "tosa.const"() {value = dense<-103> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<93> : tensor<1xi8>} : () -> tensor<1xi8> diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index 73eabab657f38..bdd403567a4ed 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -376,29 +376,42 @@ func.func @test_table_dynamic(%arg0 : tensor<4x?xi16>, %arg1 : tensor<513xi16>) // CHECK-LABEL: @test_static_reshape func.func @test_static_reshape(%arg0 : tensor<4x4xi32>) -> () { - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST3:.+]] = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + %3 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: tosa.reshape %arg0, %[[CONST3]] : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> + %0 = tosa.reshape %arg0, %3 : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST4:.+]] = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: tosa.reshape %arg0, %[[CONST4]] : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> + %4 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + %1 = tosa.reshape %arg0, %4 : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<2x8xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST5:.+]] = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %arg0, %[[CONST5]] : (tensor<4x4xi32>, !tosa.shape<2>) -> tensor<2x8xi32> + %5 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %arg0, %5 : (tensor<4x4xi32>, !tosa.shape<2>) -> tensor<2x8xi32> return } + // ----- // CHECK-LABEL: @test_dynamic_reshape func.func @test_dynamic_reshape(%arg0 : tensor<4x?xi32>) -> () { - // CHECK: %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<16xi32> - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - - // CHECK: %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - - // CHECK: %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<2x?xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor + // CHECK: %0 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + %0 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: %1 = tosa.reshape %arg0, %0 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor<16xi32> + %1 = tosa.reshape %arg0, %0 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + + // CHECK: %2 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + %2 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: %3 = tosa.reshape %arg0, %2 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + %3 = tosa.reshape %arg0, %2 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + + // CHECK: %4 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + %4 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: %5 = tosa.reshape %arg0, %4 : (tensor<4x?xi32>, !tosa.shape<2>) -> tensor<2x?xi32> + %5 = tosa.reshape %arg0, %4 : (tensor<4x?xi32>, !tosa.shape<2>) -> tensor return } diff --git a/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir b/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir index f274eb9c10a81..947335e45a9d9 100644 --- a/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir @@ -141,12 +141,14 @@ func.func @test_mulop_conversion(%arg0: tensor<1x2x3x4xi32>, %arg1: tensor<1x2x3 // COM: this case is a reshape we don't convert, since can't fold the transpose into it. // COM: a transform actually occurs underneath the hood, but it results in identical IR. // CHECK-LABEL: @test_basic_non_broadcasting_reshape -// CHECK: "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> -// CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<2x3xi32>) -> tensor<1x3x2xi32> -// CHECK: tosa.transpose %1, %0 : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> +// CHECK-DAG: %[[VAL_1:.*]] = tosa.const_shape {value = dense<[1, 3, 2]> : tensor<3xindex>} +// CHECK-DAG: %[[VAL_2:.*]] = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> +// CHECK: %[[VAL_3:.*]] = tosa.reshape %arg0, %[[VAL_1]] : (tensor<2x3xi32>, !tosa.shape<3>) -> tensor<1x3x2xi32> +// CHECK: %[[VAL_4:.*]] = tosa.transpose %[[VAL_3]], %[[VAL_2]] : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> func.func @test_basic_non_broadcasting_reshape(%arg0: tensor<2x3xi32>) -> tensor<1x2x3xi32> { + %shape = tosa.const_shape {value = dense<[1, 3, 2]> : tensor<3xindex>} : () -> !tosa.shape<3> %perms = "tosa.const"() {value = dense<[0, 2, 1]> : tensor<3xi32>} : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<2x3xi32>) -> tensor<1x3x2xi32> + %1 = tosa.reshape %arg0, %shape : (tensor<2x3xi32>, !tosa.shape<3>) -> tensor<1x3x2xi32> %2 = tosa.transpose %1, %perms : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> return %2 : tensor<1x2x3xi32> } @@ -154,11 +156,13 @@ func.func @test_basic_non_broadcasting_reshape(%arg0: tensor<2x3xi32>) -> tensor // ----- // CHECK-LABEL: @test_dynamic_broadcasting_reshape -// CHECK: %[[RES:.*]] = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x1x?xi32> +// CHECK-DAG: %[[SHAPE:.*]] = tosa.const_shape {value = dense<[1, 1, -1]> : tensor<3xindex>} +// CHECK: %[[RES:.*]] = tosa.reshape %arg0, %[[SHAPE]] : (tensor, !tosa.shape<3>) -> tensor<1x1x?xi32> // CHECK: return %[[RES]] func.func @test_dynamic_broadcasting_reshape(%arg0: tensor) -> tensor<1x1x?xi32> { + %shape = tosa.const_shape {value = dense<[1, -1, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> %perms = "tosa.const"() {value = dense<[0, 2, 1]> : tensor<3xi32>} : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x?x1xi32> + %1 = tosa.reshape %arg0, %shape : (tensor, !tosa.shape<3>) -> tensor<1x?x1xi32> %2 = tosa.transpose %1, %perms : (tensor<1x?x1xi32>, tensor<3xi32>) -> tensor<1x1x?xi32> return %2 : tensor<1x1x?xi32> } @@ -167,12 +171,14 @@ func.func @test_dynamic_broadcasting_reshape(%arg0: tensor) -> tensor<1x1 // CHECK-LABEL: @test_reshape_for_broadcast // CHECK-DAG: %[[RESHAPE_INPUT:.*]] = "tosa.const"() <{value = dense<[1, 2, 3, 4]> -// CHECK-DAG: %[[RESHAPE:.*]] = tosa.reshape %[[RESHAPE_INPUT]] {new_shape = array} -// CHECK-DAG: %[[ADD:.*]] = tosa.add %arg0, %[[RESHAPE]] +// CHECK-DAG: %[[SHAPE:.*]] = tosa.const_shape {value = dense<[4, 1, 1]> : tensor<3xindex>} +// CHECK: %[[RESHAPE:.*]] = tosa.reshape %[[RESHAPE_INPUT]], %[[SHAPE]] : (tensor<4xi32>, !tosa.shape<3>) -> tensor<4x1x1xi32> +// CHECK: %[[ADD:.*]] = tosa.add %arg0, %[[RESHAPE]] // CHECK: return %[[ADD]] func.func @test_reshape_for_broadcast(%arg0: tensor<4x3x2xi32>) -> tensor<4x3x2xi32> { %0 = "tosa.const"() {value = dense<[1,2,3,4]> : tensor<4xi32>} : () -> tensor<4xi32> - %reshape = tosa.reshape %0 {new_shape = array} : (tensor<4xi32>) -> tensor<1x1x4xi32> + %1 = tosa.const_shape {value = dense<[1, 1, 4]> : tensor<3xindex>} : () -> !tosa.shape<3> + %reshape = tosa.reshape %0, %1 : (tensor<4xi32>, !tosa.shape<3>) -> tensor<1x1x4xi32> %perms0 = "tosa.const"() {value = dense<[2, 1, 0]> : tensor<3xi32>} : () -> tensor<3xi32> %transpose0 = tosa.transpose %arg0, %perms0 : (tensor<4x3x2xi32>, tensor<3xi32>) -> tensor<2x3x4xi32> %add = tosa.add %transpose0, %reshape : (tensor<2x3x4xi32>, tensor<1x1x4xi32>) -> tensor<2x3x4xi32> @@ -187,25 +193,28 @@ func.func @test_reshape_for_broadcast(%arg0: tensor<4x3x2xi32>) -> tensor<4x3x2x // CHECK-LABEL: @test_resnet18_common_case // COM: note that %74 is now represented by %arg2 -// CHECK-DAG: %[[VAL_2:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> -// CHECK-DAG: %[[VAL_3:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> -// CHECK-DAG: %[[VAL_4:.*]] = "tosa.const"() <{value = dense<9.99999974E-6> : tensor<1xf32>}> : () -> tensor<1xf32> -// CHECK-DAG: %[[VAL_5:.*]] = "tosa.const"() <{value = dense<5.000000e-01> : tensor<1xf32>}> : () -> tensor<1xf32> -// CHECK-DAG: %[[VAL_6:.*]] = tosa.add %arg1, %[[VAL_4]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_7:.*]] = tosa.pow %[[VAL_6]], %[[VAL_5]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_8:.*]] = tosa.reciprocal %[[VAL_7]] : (tensor<64xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_9:.*]] = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_10:.*]] = tosa.sub %arg2, %[[VAL_9]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_11:.*]] = tosa.reshape %[[VAL_8]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_12:.*]] = tosa.mul %[[VAL_10]], %[[VAL_11]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_13:.*]] = tosa.reshape %[[VAL_3]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_14:.*]] = tosa.mul %[[VAL_12]], %[[VAL_13]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_15:.*]] = tosa.reshape %[[VAL_2]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_16:.*]] = tosa.add %[[VAL_14]], %[[VAL_15]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_17:.*]] = tosa.clamp %[[VAL_16]] {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK: return %[[VAL_17]] : tensor<1x112x112x64xf32> - +// CHECK-DAG: %[[VAL_3:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> +// CHECK-DAG: %[[VAL_4:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> +// CHECK-DAG: %[[VAL_5:.*]] = "tosa.const"() <{value = dense<9.99999974E-6> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK-DAG: %[[VAL_6:.*]] = "tosa.const"() <{value = dense<5.000000e-01> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK-DAG: %[[VAL_7:.*]] = tosa.add %arg1, %[[VAL_5]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_8:.*]] = tosa.pow %[[VAL_7]], %[[VAL_6]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_9:.*]] = tosa.reciprocal %[[VAL_8]] : (tensor<64xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_10:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_11:.*]] = tosa.reshape %arg0, %[[VAL_10]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_12:.*]] = tosa.sub %arg2, %[[VAL_11]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_13:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_14:.*]] = tosa.reshape %[[VAL_9]], %[[VAL_13]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_15:.*]] = tosa.mul %[[VAL_12]], %[[VAL_14]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_16:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_17:.*]] = tosa.reshape %[[VAL_4]], %[[VAL_16]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_18:.*]] = tosa.mul %[[VAL_15]], %[[VAL_17]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_19:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_20:.*]] = tosa.reshape %[[VAL_3]], %[[VAL_19]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_21:.*]] = tosa.add %[[VAL_18]], %[[VAL_20]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_22:.*]] = tosa.clamp %[[VAL_21]] {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> func.func @test_resnet18_common_case(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %74: tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> { + %58 = tosa.const_shape {value = dense<[1, 64, 1, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> %59 = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> %60 = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> %63 = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32> @@ -216,13 +225,13 @@ func.func @test_resnet18_common_case(%arg0: tensor<64xf32>, %arg1: tensor<64xf32 %76 = tosa.add %arg1, %69 : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> %77 = tosa.pow %76, %70 : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> %78 = tosa.reciprocal %77 : (tensor<64xf32>) -> tensor<64xf32> - %79 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %79 = tosa.reshape %arg0, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %80 = tosa.sub %75, %79 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %81 = tosa.reshape %78 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %81 = tosa.reshape %78, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %82 = tosa.mul %80, %81 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %83 = tosa.reshape %60 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %83 = tosa.reshape %60, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %84 = tosa.mul %82, %83 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %85 = tosa.reshape %59 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %85 = tosa.reshape %59, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %86 = tosa.add %84, %85 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> %87 = tosa.clamp %86 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x112x112xf32>) -> tensor<1x64x112x112xf32> %88 = tosa.transpose %87, %63 : (tensor<1x64x112x112xf32>, tensor<4xi32>) -> tensor<1x112x112x64xf32> @@ -285,7 +294,8 @@ func.func @test_no_transform_if_outside_fan_in_cone(%arg0: tensor<3x3x3x3xi32>) // CHECK: return %[[RESHAPE]], %[[CLAMP]] func.func @test_two_different_downstream_converge_to_reshape_same_perms(%arg0: tensor<64xf32>) -> (tensor<1x1x64xf32>, tensor<1x1x64xf32>) { %0 = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1xf32> + %shape = tosa.const_shape {value = dense<[1, 64, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> + %1 = tosa.reshape %arg0, %shape : (tensor<64xf32>, !tosa.shape<3>) -> tensor<1x64x1xf32> %2 = tosa.clamp %1 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x1xf32>) -> tensor<1x64x1xf32> %3 = tosa.transpose %1, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> %4 = tosa.transpose %2, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> @@ -305,7 +315,8 @@ func.func @test_two_different_downstream_converge_to_reshape_same_perms(%arg0: t func.func @test_two_different_downstream_converge_to_reshape_different_perms(%arg0: tensor<64xf32>) -> (tensor<1x1x64xf32>, tensor<64x1x1xf32>) { %0 = "tosa.const"() <{value = dense<[1, 2, 0]> : tensor<3xi32>}> : () -> tensor<3xi32> %1 = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1xf32> + %shape = tosa.const_shape {value = dense<[1, 64, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> + %2 = tosa.reshape %arg0, %shape : (tensor<64xf32>, !tosa.shape<3>) -> tensor<1x64x1xf32> %3 = tosa.clamp %2 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x1xf32>) -> tensor<1x64x1xf32> %4 = tosa.transpose %2, %1 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> %5 = tosa.transpose %3, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<64x1x1xf32> diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir index 61e858f5f226a..a74e562ad2f68 100644 --- a/mlir/test/Dialect/Vector/canonicalize.mlir +++ b/mlir/test/Dialect/Vector/canonicalize.mlir @@ -132,10 +132,35 @@ func.func @extract_from_create_mask_dynamic_position(%dim0: index, %index: index // ----- +// CHECK-LABEL: @extract_scalar_poison +func.func @extract_scalar_poison() -> f32 { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 + // CHECK-NOT: vector.extract + // CHECK-NEXT: return %[[UB]] : f32 + %0 = ub.poison : vector<4x8xf32> + %1 = vector.extract %0[2, 4] : f32 from vector<4x8xf32> + return %1 : f32 +} + +// ----- + +// CHECK-LABEL: @extract_vector_poison +func.func @extract_vector_poison() -> vector<8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> + // CHECK-NOT: vector.extract + // CHECK-NEXT: return %[[UB]] : vector<8xf32> + %0 = ub.poison : vector<4x8xf32> + %1 = vector.extract %0[2] : vector<8xf32> from vector<4x8xf32> + return %1 : vector<8xf32> +} + +// ----- + // CHECK-LABEL: @extract_scalar_poison_idx func.func @extract_scalar_poison_idx(%a: vector<4x5xf32>) -> f32 { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : f32 + // CHECK-NEXT: return %[[UB]] : f32 %0 = vector.extract %a[-1, 0] : f32 from vector<4x5xf32> return %0 : f32 } @@ -144,8 +169,9 @@ func.func @extract_scalar_poison_idx(%a: vector<4x5xf32>) -> f32 { // CHECK-LABEL: @extract_vector_poison_idx func.func @extract_vector_poison_idx(%a: vector<4x5xf32>) -> vector<5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<5xf32> // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : vector<5xf32> + // CHECK-NEXT: return %[[UB]] : vector<5xf32> %0 = vector.extract %a[-1] : vector<5xf32> from vector<4x5xf32> return %0 : vector<5xf32> } @@ -155,8 +181,9 @@ func.func @extract_vector_poison_idx(%a: vector<4x5xf32>) -> vector<5xf32> { // CHECK-LABEL: @extract_multiple_poison_idx func.func @extract_multiple_poison_idx(%a: vector<4x5x8xf32>) -> vector<8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : vector<8xf32> + // CHECK-NEXT: return %[[UB]] : vector<8xf32> %0 = vector.extract %a[-1, -1] : vector<8xf32> from vector<4x5x8xf32> return %0 : vector<8xf32> } @@ -2886,13 +2913,47 @@ func.func @vector_insert_const_regression(%arg0: i8) -> vector<4xi8> { return %1 : vector<4xi8> } +// ----- + +// Insert a poison value shouldn't be folded as the resulting vector is not +// fully poison. + +// CHECK-LABEL: @insert_scalar_poison +func.func @insert_scalar_poison(%a: vector<4x8xf32>) + -> vector<4x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 + // CHECK-NEXT: %[[RES:.*]] = vector.insert %[[UB]] + // CHECK-NEXT: return %[[RES]] : vector<4x8xf32> + %0 = ub.poison : f32 + %1 = vector.insert %0, %a[2, 3] : f32 into vector<4x8xf32> + return %1 : vector<4x8xf32> +} + +// ----- + +// Insert a poison value shouldn't be folded as the resulting vector is not +// fully poison. + +// CHECK-LABEL: @insert_vector_poison +func.func @insert_vector_poison(%a: vector<4x8xf32>) + -> vector<4x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> + // CHECK-NEXT: %[[RES:.*]] = vector.insert %[[UB]] + // CHECK-NEXT: return %[[RES]] : vector<4x8xf32> + %0 = ub.poison : vector<8xf32> + %1 = vector.insert %0, %a[2] : vector<8xf32> into vector<4x8xf32> + return %1 : vector<4x8xf32> +} + + // ----- // CHECK-LABEL: @insert_scalar_poison_idx func.func @insert_scalar_poison_idx(%a: vector<4x5xf32>, %b: f32) -> vector<4x5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5xf32> %0 = vector.insert %b, %a[-1, 0] : f32 into vector<4x5xf32> return %0 : vector<4x5xf32> } @@ -2902,8 +2963,9 @@ func.func @insert_scalar_poison_idx(%a: vector<4x5xf32>, %b: f32) // CHECK-LABEL: @insert_vector_poison_idx func.func @insert_vector_poison_idx(%a: vector<4x5xf32>, %b: vector<5xf32>) -> vector<4x5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5xf32> %0 = vector.insert %b, %a[-1] : vector<5xf32> into vector<4x5xf32> return %0 : vector<4x5xf32> } @@ -2913,8 +2975,9 @@ func.func @insert_vector_poison_idx(%a: vector<4x5xf32>, %b: vector<5xf32>) // CHECK-LABEL: @insert_multiple_poison_idx func.func @insert_multiple_poison_idx(%a: vector<4x5x8xf32>, %b: vector<8xf32>) -> vector<4x5x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5x8xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5x8xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5x8xf32> %0 = vector.insert %b, %a[-1, -1] : vector<8xf32> into vector<4x5x8xf32> return %0 : vector<4x5x8xf32> } diff --git a/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir index 346291019451c..29e7007666e87 100644 --- a/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir @@ -24,7 +24,7 @@ func.func @vector_bitcast_2d(%arg0: vector<2x4xi32>) -> vector<2x2xi64> { } // CHECK-LABEL: func.func @vector_bitcast_2d // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant {{.+}} : vector<2x2xi64> +// CHECK: %[[INIT:.+]] = ub.poison : vector<2x2xi64> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0] : vector<4xi32> from vector<2x4xi32> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<4xi32> to vector<2xi64> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0] @@ -39,7 +39,7 @@ func.func @vector_bitcast_4d_with_scalable_dim(%arg0: vector<1x2x[3]x4xi64>) -> } // CHECK-LABEL: func.func @vector_bitcast_4d_with_scalable_dim // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant dense<0> : vector<1x2x[3]x8xi32> +// CHECK: %[[INIT:.+]] = ub.poison : vector<1x2x[3]x8xi32> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0, 0] : vector<[3]x4xi64> from vector<1x2x[3]x4xi64> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<[3]x4xi64> to vector<[3]x8xi32> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0, 0] : vector<[3]x8xi32> into vector<1x2x[3]x8xi32> @@ -54,7 +54,7 @@ func.func @vector_bitcast_2d_trailing_scalable_dim(%arg0: vector<2x[2]xi64>) -> } // CHECK-LABEL: func.func @vector_bitcast_2d_trailing_scalable_dim // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant dense<0> : vector<2x[4]xi32> +// CHECK: %[[INIT:.+]] = ub.poison : vector<2x[4]xi32> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0] : vector<[2]xi64> from vector<2x[2]xi64> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<[2]xi64> to vector<[4]xi32> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0] : vector<[4]xi32> into vector<2x[4]xi32> diff --git a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir index 4a5ea439134cf..8e167a520260f 100644 --- a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir @@ -41,8 +41,8 @@ func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> { // CHECK-LABEL: func @broadcast_vec2d_from_vec1d // CHECK-SAME: %[[A:.*0]]: vector<2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: return %[[T2]] : vector<3x2xf32> @@ -54,12 +54,12 @@ func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> { // CHECK-LABEL: func @broadcast_vec3d_from_vec1d // CHECK-SAME: %[[A:.*0]]: vector<2xf32> -// CHECK-DAG: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> -// CHECK-DAG: %[[C1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK-DAG: %[[U0:.*]] = ub.poison : vector<3x2xf32> +// CHECK-DAG: %[[U1:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<2xf32> into vector<3x2xf32> -// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C1]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[U1]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T4:.*]] = vector.insert %[[T2]], %[[T3]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T5:.*]] = vector.insert %[[T2]], %[[T4]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T6:.*]] = vector.insert %[[T2]], %[[T5]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -72,8 +72,8 @@ func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> // CHECK-LABEL: func @broadcast_vec3d_from_vec2d // CHECK-SAME: %[[A:.*0]]: vector<3x2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[A]], %[[T2]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -97,9 +97,9 @@ func.func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> { // CHECK-LABEL: func @broadcast_stretch_at_start // CHECK-SAME: %[[A:.*0]]: vector<1x4xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x4xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<3x4xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : vector<4xf32> from vector<1x4xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C0]] [0] : vector<4xf32> into vector<3x4xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[U0]] [0] : vector<4xf32> into vector<3x4xf32> // CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[T1]] [1] : vector<4xf32> into vector<3x4xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T0]], %[[T2]] [2] : vector<4xf32> into vector<3x4xf32> // CHECK: return %[[T3]] : vector<3x4xf32> @@ -111,10 +111,10 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> // CHECK-LABEL: func @broadcast_stretch_at_end // CHECK-SAME: %[[A:.*0]]: vector<4x1xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<4x1xf32> // CHECK: %[[T2:.*]] = vector.splat %[[T0]] : vector<3xf32> -// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C0]] [0] : vector<3xf32> into vector<4x3xf32> +// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[U0]] [0] : vector<3xf32> into vector<4x3xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][1, 0] : f32 from vector<4x1xf32> // CHECK: %[[T6:.*]] = vector.splat %[[T4]] : vector<3xf32> // CHECK: %[[T7:.*]] = vector.insert %[[T6]], %[[T3]] [1] : vector<3xf32> into vector<4x3xf32> @@ -133,25 +133,25 @@ func.func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> { // CHECK-LABEL: func @broadcast_stretch_in_middle // CHECK-SAME: %[[A:.*0]]: vector<4x1x2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[C1:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[U1:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T0]], %[[T2]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T4:.*]] = vector.insert %[[T0]], %[[T3]] [2] : vector<2xf32> into vector<3x2xf32> -// CHECK: %[[T5:.*]] = vector.insert %[[T4]], %[[C0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[T5:.*]] = vector.insert %[[T4]], %[[U0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T6:.*]] = vector.extract %[[A]][1, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T8:.*]] = vector.insert %[[T6]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T8:.*]] = vector.insert %[[T6]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T9:.*]] = vector.insert %[[T6]], %[[T8]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T10:.*]] = vector.insert %[[T6]], %[[T9]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T11:.*]] = vector.insert %[[T10]], %[[T5]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T12:.*]] = vector.extract %[[A]][2, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T14:.*]] = vector.insert %[[T12]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T14:.*]] = vector.insert %[[T12]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T15:.*]] = vector.insert %[[T12]], %[[T14]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T16:.*]] = vector.insert %[[T12]], %[[T15]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T17:.*]] = vector.insert %[[T16]], %[[T11]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T18:.*]] = vector.extract %[[A]][3, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T20:.*]] = vector.insert %[[T18]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T20:.*]] = vector.insert %[[T18]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T21:.*]] = vector.insert %[[T18]], %[[T20]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T22:.*]] = vector.insert %[[T18]], %[[T21]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T23:.*]] = vector.insert %[[T22]], %[[T17]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -164,8 +164,8 @@ func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2 // CHECK-LABEL: func.func @broadcast_scalable_duplication // CHECK-SAME: %[[ARG0:.*]]: vector<[32]xf32>) -// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<1x[32]xf32> -// CHECK: %[[RES:.*]] = vector.insert %[[ARG0]], %[[CST]] [0] : vector<[32]xf32> into vector<1x[32]xf32> +// CHECK: %[[INIT:.*]] = ub.poison : vector<1x[32]xf32> +// CHECK: %[[RES:.*]] = vector.insert %[[ARG0]], %[[INIT]] [0] : vector<[32]xf32> into vector<1x[32]xf32> // CHECK: return %[[RES]] : vector<1x[32]xf32> func.func @broadcast_scalable_duplication(%arg0: vector<[32]xf32>) -> vector<1x[32]xf32> { diff --git a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir index 4867a416e5d14..08ac2ac5bb7d5 100644 --- a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir @@ -14,15 +14,15 @@ // CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: vector<2x4xf32>, // CHECK-SAME: %[[B:[a-zA-Z0-9]*]]: vector<4x3xf32>, // CHECK-SAME: %[[C:[a-zA-Z0-9]*]]: vector<2x3xf32> -// CHECK-DAG: %[[vcst:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> -// CHECK-DAG: %[[vcst_0:.*]] = arith.constant dense<0.000000e+00> : vector<12xf32> -// CHECK-DAG: %[[vcst_1:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32> +// CHECK-DAG: %[[ub:.*]] = ub.poison : vector<8xf32> +// CHECK-DAG: %[[ub_0:.*]] = ub.poison : vector<12xf32> +// CHECK-DAG: %[[ub_1:.*]] = ub.poison : vector<2x3xf32> // CHECK: %[[a0:.*]] = vector.extract %[[A]][0] : vector<4xf32> from vector<2x4xf32> -// CHECK: %[[a1:.*]] = vector.insert_strided_slice %[[a0]], %[[vcst]] {offsets = [0], strides = [1]} : vector<4xf32> into vector<8xf32> +// CHECK: %[[a1:.*]] = vector.insert_strided_slice %[[a0]], %[[ub]] {offsets = [0], strides = [1]} : vector<4xf32> into vector<8xf32> // CHECK: %[[a2:.*]] = vector.extract %[[A]][1] : vector<4xf32> from vector<2x4xf32> // CHECK: %[[a3:.*]] = vector.insert_strided_slice %[[a2]], %[[a1]] {offsets = [4], strides = [1]} : vector<4xf32> into vector<8xf32> // CHECK: %[[b0:.*]] = vector.extract %[[B]][0] : vector<3xf32> from vector<4x3xf32> -// CHECK: %[[b1:.*]] = vector.insert_strided_slice %[[b0]], %[[vcst_0]] {offsets = [0], strides = [1]} : vector<3xf32> into vector<12xf32> +// CHECK: %[[b1:.*]] = vector.insert_strided_slice %[[b0]], %[[ub_0]] {offsets = [0], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[b2:.*]] = vector.extract %[[B]][1] : vector<3xf32> from vector<4x3xf32> // CHECK: %[[b3:.*]] = vector.insert_strided_slice %[[b2]], %[[b1]] {offsets = [3], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[b4:.*]] = vector.extract %[[B]][2] : vector<3xf32> from vector<4x3xf32> @@ -31,7 +31,7 @@ // CHECK: %[[b7:.*]] = vector.insert_strided_slice %[[b6]], %[[b5]] {offsets = [9], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[mm1:.*]] = vector.matrix_multiply %[[a3]], %[[b7]] {lhs_columns = 4 : i32, lhs_rows = 2 : i32, rhs_columns = 3 : i32} : (vector<8xf32>, vector<12xf32>) -> vector<6xf32> // CHECK: %[[mm2:.*]] = vector.extract_strided_slice %[[mm1]] {offsets = [0], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> -// CHECK: %[[mm3:.*]] = vector.insert %[[mm2]], %[[vcst_1]] [0] : vector<3xf32> into vector<2x3xf32> +// CHECK: %[[mm3:.*]] = vector.insert %[[mm2]], %[[ub_1]] [0] : vector<3xf32> into vector<2x3xf32> // CHECK: %[[mm4:.*]] = vector.extract_strided_slice %[[mm1]] {offsets = [3], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> // CHECK: %[[mm5:.*]] = vector.insert %[[mm4]], %[[mm3]] [1] : vector<3xf32> into vector<2x3xf32> // CHECK: %[[mm6:.*]] = arith.addf %[[C]], %[[mm5]] : vector<2x3xf32> diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir index fde6ce9102446..f4becad3c79c1 100644 --- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir +++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir @@ -7,9 +7,9 @@ // CHECK-SAME: %[[arg0:.*]]: vector<2x1x[4]xi32> func.func @i32_3d_to_1d_last_dim_scalable(%arg0: vector<2x1x[4]xi32>) -> vector<[8]xi32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<[8]xi32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<[8]xi32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[4]xi32> from vector<2x1x[4]xi32> - // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[cst]][0] : vector<[4]xi32> into vector<[8]xi32> + // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[ub]][0] : vector<[4]xi32> into vector<[8]xi32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1, 0] : vector<[4]xi32> from vector<2x1x[4]xi32> // CHECK-NEXT: %[[res1:.*]] = vector.scalable.insert %[[subvec1]], %[[res0]][4] : vector<[4]xi32> into vector<[8]xi32> %flat = vector.shape_cast %arg0 : vector<2x1x[4]xi32> to vector<[8]xi32> @@ -22,9 +22,9 @@ func.func @i32_3d_to_1d_last_dim_scalable(%arg0: vector<2x1x[4]xi32>) -> vector< // CHECK-LABEL: i32_1d_to_3d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<[8]xi32> func.func @i32_1d_to_3d_last_dim_scalable(%arg0: vector<[8]xi32>) -> vector<2x1x[4]xi32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<2x1x[4]xi32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<2x1x[4]xi32> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[arg0]][0] : vector<[4]xi32> from vector<[8]xi32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[arg0]][4] : vector<[4]xi32> from vector<[8]xi32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> %unflat = vector.shape_cast %arg0 : vector<[8]xi32> to vector<2x1x[4]xi32> @@ -37,9 +37,9 @@ func.func @i32_1d_to_3d_last_dim_scalable(%arg0: vector<[8]xi32>) -> vector<2x1x // CHECK-LABEL: i8_2d_to_1d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<4x[8]xi8> func.func @i8_2d_to_1d_last_dim_scalable(%arg0: vector<4x[8]xi8>) -> vector<[32]xi8> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<[32]xi8> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<[32]xi8> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0] : vector<[8]xi8> from vector<4x[8]xi8> - // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[cst]][0] : vector<[8]xi8> into vector<[32]xi8> + // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[ub]][0] : vector<[8]xi8> into vector<[32]xi8> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1] : vector<[8]xi8> from vector<4x[8]xi8> // CHECK-NEXT: %[[res1:.*]] = vector.scalable.insert %[[subvec1]], %[[res0]][8] : vector<[8]xi8> into vector<[32]xi8> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][2] : vector<[8]xi8> from vector<4x[8]xi8> @@ -56,9 +56,9 @@ func.func @i8_2d_to_1d_last_dim_scalable(%arg0: vector<4x[8]xi8>) -> vector<[32] // CHECK-LABEL: i8_1d_to_2d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<[32]xi8> func.func @i8_1d_to_2d_last_dim_scalable(%arg0: vector<[32]xi8>) -> vector<4x[8]xi8> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<4x[8]xi8> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<4x[8]xi8> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[arg0]][0] : vector<[8]xi8> from vector<[32]xi8> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[8]xi8> into vector<4x[8]xi8> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[8]xi8> into vector<4x[8]xi8> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[arg0]][8] : vector<[8]xi8> from vector<[32]xi8> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[8]xi8> into vector<4x[8]xi8> // CHECK-NEXT: %[[subvec2:.*]] = vector.scalable.extract %[[arg0]][16] : vector<[8]xi8> from vector<[32]xi8> @@ -75,9 +75,9 @@ func.func @i8_1d_to_2d_last_dim_scalable(%arg0: vector<[32]xi8>) -> vector<4x[8] // CHECK-LABEL: f32_permute_leading_non_scalable_dims // CHECK-SAME: %[[arg0:.*]]: vector<2x3x[4]xf32> func.func @f32_permute_leading_non_scalable_dims(%arg0: vector<2x3x[4]xf32>) -> vector<3x2x[4]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<3x2x[4]xf32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[4]xf32> from vector<2x3x[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0, 0] : vector<[4]xf32> into vector<3x2x[4]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0, 0] : vector<[4]xf32> into vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][0, 1] : vector<[4]xf32> from vector<2x3x[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [0, 1] : vector<[4]xf32> into vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][0, 2] : vector<[4]xf32> from vector<2x3x[4]xf32> @@ -99,9 +99,9 @@ func.func @f32_permute_leading_non_scalable_dims(%arg0: vector<2x3x[4]xf32>) -> // CHECK-SAME: %[[arg0:.*]]: vector<2x2x[2]xf64> func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> vector<4x[2]xf64> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<4x[2]xf64> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<4x[2]xf64> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[2]xf64> from vector<2x2x[2]xf64> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[2]xf64> into vector<4x[2]xf64> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[2]xf64> into vector<4x[2]xf64> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][0, 1] : vector<[2]xf64> from vector<2x2x[2]xf64> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[2]xf64> into vector<4x[2]xf64> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][1, 0] : vector<[2]xf64> from vector<2x2x[2]xf64> @@ -109,7 +109,7 @@ func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> // CHECK-NEXT: %[[subvec3:.*]] = vector.extract %[[arg0]][1, 1] : vector<[2]xf64> from vector<2x2x[2]xf64> // CHECK-NEXT: %[[res3:.*]] = vector.insert %[[subvec3]], %[[res2]] [3] : vector<[2]xf64> into vector<4x[2]xf64> %res = vector.shape_cast %arg0: vector<2x2x[2]xf64> to vector<4x[2]xf64> - // CHECK-NEXT: return %7 : vector<4x[2]xf64> + // CHECK-NEXT: return %[[res3:.*]] : vector<4x[2]xf64> return %res : vector<4x[2]xf64> } @@ -119,10 +119,10 @@ func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> // CHECK-SAME: %[[arg0:.*]]: vector<3x[4]xf32> func.func @f32_reduce_trailing_scalable_dim(%arg0: vector<3x[4]xf32>) -> vector<6x[2]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<6x[2]xf32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<6x[2]xf32> // CHECK-NEXT: %[[srcvec0:.*]] = vector.extract %[[arg0]][0] : vector<[4]xf32> from vector<3x[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[srcvec0]][0] : vector<[2]xf32> from vector<[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[2]xf32> into vector<6x[2]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[2]xf32> into vector<6x[2]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[srcvec0]][2] : vector<[2]xf32> from vector<[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[2]xf32> into vector<6x[2]xf32> // CHECK-NEXT: %[[srcvec1:.*]] = vector.extract %[[arg0]][1] : vector<[4]xf32> from vector<3x[4]xf32> @@ -146,16 +146,15 @@ func.func @f32_reduce_trailing_scalable_dim(%arg0: vector<3x[4]xf32>) -> vector< // CHECK-SAME: %[[arg0:.*]]: vector<4x[2]xf32> func.func @f32_increase_trailing_scalable_dim(%arg0: vector<4x[2]xf32>) -> vector<2x[4]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<2x[4]xf32> + // CHECK-DAG: %[[ub0:.*]] = ub.poison : vector<2x[4]xf32> + // CHECK-DAG: %[[ub1:.*]] = ub.poison : vector<[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0] : vector<[2]xf32> from vector<4x[2]xf32> - // CHECK-NEXT: %[[resvec0:.*]] = vector.extract %[[cst]][0] : vector<[4]xf32> from vector<2x[4]xf32> - // CHECK-NEXT: %[[resvec1:.*]] = vector.scalable.insert %[[subvec0]], %[[resvec0]][0] : vector<[2]xf32> into vector<[4]xf32> + // CHECK-NEXT: %[[resvec1:.*]] = vector.scalable.insert %[[subvec0]], %[[ub1]][0] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1] : vector<[2]xf32> from vector<4x[2]xf32> // CHECK-NEXT: %[[resvec2:.*]] = vector.scalable.insert %[[subvec1]], %[[resvec1]][2] : vector<[2]xf32> into vector<[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[resvec2]], %[[cst]] [0] : vector<[4]xf32> into vector<2x[4]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[resvec2]], %[[ub0]] [0] : vector<[4]xf32> into vector<2x[4]xf32> // CHECK-NEXT: %[[subvec3:.*]] = vector.extract %[[arg0]][2] : vector<[2]xf32> from vector<4x[2]xf32> - // CHECK-NEXT: %[[resvec3:.*]] = vector.extract %[[cst]][1] : vector<[4]xf32> from vector<2x[4]xf32> - // CHECK-NEXT: %[[resvec4:.*]] = vector.scalable.insert %[[subvec3]], %[[resvec3]][0] : vector<[2]xf32> into vector<[4]xf32> + // CHECK-NEXT: %[[resvec4:.*]] = vector.scalable.insert %[[subvec3]], %[[ub1]][0] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[subvec4:.*]] = vector.extract %[[arg0]][3] : vector<[2]xf32> from vector<4x[2]xf32> // CHECK-NEXT: %[[resvec5:.*]] = vector.scalable.insert %[[subvec4]], %[[resvec4]][2] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[resvec5]], %[[res0]] [1] : vector<[4]xf32> into vector<2x[4]xf32> diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir index b4c52d5533116..ab30acf68b30b 100644 --- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir @@ -22,11 +22,11 @@ func.func @cancel_shape_cast(%arg0: vector<16xf32>) -> vector<16xf32> { // llvm.matrix operations // CHECK-LABEL: func @shape_casts func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) { - // CHECK-DAG: %[[cst22:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32> - // CHECK-DAG: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<4xf32> + // CHECK-DAG: %[[ub22:.*]] = ub.poison : vector<2x2xf32> + // CHECK-DAG: %[[ub:.*]] = ub.poison : vector<4xf32> // CHECK: %[[ex0:.*]] = vector.extract %{{.*}}[0] : vector<2xf32> from vector<2x2xf32> // - // CHECK: %[[in0:.*]] = vector.insert_strided_slice %[[ex0]], %[[cst]] + // CHECK: %[[in0:.*]] = vector.insert_strided_slice %[[ex0]], %[[ub]] // CHECK-SAME: {offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> // // CHECK: %[[ex1:.*]] = vector.extract %{{.*}}[1] : vector<2xf32> from vector<2x2xf32> @@ -42,7 +42,7 @@ func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) // CHECK-SAME: {offsets = [0], sizes = [2], strides = [1]} : // CHECK-SAME: vector<4xf32> to vector<2xf32> // - // CHECK: %[[res0:.*]] = vector.insert %[[ss0]], %[[cst22]] [0] : + // CHECK: %[[res0:.*]] = vector.insert %[[ss0]], %[[ub22]] [0] : // CHECK-SAME: vector<2xf32> into vector<2x2xf32> // // CHECK: %[[s2:.*]] = vector.extract_strided_slice %[[add]] @@ -59,9 +59,9 @@ func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) // CHECK-LABEL: func @shape_cast_2d2d // CHECK-SAME: %[[A:.*]]: vector<3x2xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<2x3xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<3x2xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C]] [0, 0] : f32 into vector<2x3xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : f32 into vector<2x3xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : f32 from vector<3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [0, 1] : f32 into vector<2x3xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][1, 0] : f32 from vector<3x2xf32> @@ -81,9 +81,9 @@ func.func @shape_cast_2d2d(%arg0 : vector<3x2xf32>) -> vector<2x3xf32> { // CHECK-LABEL: func @shape_cast_3d1d // CHECK-SAME: %[[A:.*]]: vector<1x3x2xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<6xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<6xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : vector<2xf32> from vector<1x3x2xf32> -// CHECK: %[[T1:.*]] = vector.insert_strided_slice %[[T0]], %[[C]] +// CHECK: %[[T1:.*]] = vector.insert_strided_slice %[[T0]], %[[UB]] // CHECK-SAME: {offsets = [0], strides = [1]} : vector<2xf32> into vector<6xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : vector<2xf32> from vector<1x3x2xf32> // CHECK: %[[T3:.*]] = vector.insert_strided_slice %[[T2]], %[[T1]] @@ -100,10 +100,10 @@ func.func @shape_cast_3d1d(%arg0 : vector<1x3x2xf32>) -> vector<6xf32> { // CHECK-LABEL: func @shape_cast_1d3d // CHECK-SAME: %[[A:.*]]: vector<6xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<2x1x3xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<2x1x3xf32> // CHECK: %[[T0:.*]] = vector.extract_strided_slice %[[A]] // CHECK-SAME: {offsets = [0], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C]] [0, 0] : vector<3xf32> into vector<2x1x3xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : vector<3xf32> into vector<2x1x3xf32> // CHECK: %[[T2:.*]] = vector.extract_strided_slice %[[A]] // CHECK: {offsets = [3], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [1, 0] : vector<3xf32> into vector<2x1x3xf32> @@ -115,11 +115,11 @@ func.func @shape_cast_1d3d(%arg0 : vector<6xf32>) -> vector<2x1x3xf32> { } // CHECK-LABEL: func.func @shape_cast_0d1d( -// CHECK-SAME: %[[VAL_0:.*]]: vector) -> vector<1xf32> { -// CHECK: %[[VAL_1:.*]] = arith.constant dense<0.000000e+00> : vector<1xf32> -// CHECK: %[[VAL_2:.*]] = vector.extractelement %[[VAL_0]][] : vector -// CHECK: %[[VAL_3:.*]] = vector.insert %[[VAL_2]], %[[VAL_1]] [0] : f32 into vector<1xf32> -// CHECK: return %[[VAL_3]] : vector<1xf32> +// CHECK-SAME: %[[ARG0:.*]]: vector) -> vector<1xf32> { +// CHECK: %[[UB:.*]] = ub.poison : vector<1xf32> +// CHECK: %[[EXTRACT0:.*]] = vector.extractelement %[[ARG0]][] : vector +// CHECK: %[[RES:.*]] = vector.insert %[[EXTRACT0]], %[[UB]] [0] : f32 into vector<1xf32> +// CHECK: return %[[RES]] : vector<1xf32> // CHECK: } func.func @shape_cast_0d1d(%arg0 : vector) -> vector<1xf32> { @@ -128,11 +128,11 @@ func.func @shape_cast_0d1d(%arg0 : vector) -> vector<1xf32> { } // CHECK-LABEL: func.func @shape_cast_1d0d( -// CHECK-SAME: %[[VAL_0:.*]]: vector<1xf32>) -> vector { -// CHECK: %[[VAL_1:.*]] = arith.constant dense<0.000000e+00> : vector -// CHECK: %[[VAL_2:.*]] = vector.extract %[[VAL_0]][0] : f32 from vector<1xf32> -// CHECK: %[[VAL_3:.*]] = vector.insertelement %[[VAL_2]], %[[VAL_1]][] : vector -// CHECK: return %[[VAL_3]] : vector +// CHECK-SAME: %[[ARG0:.*]]: vector<1xf32>) -> vector { +// CHECK: %[[UB:.*]] = ub.poison : vector +// CHECK: %[[EXTRACT0:.*]] = vector.extract %[[ARG0]][0] : f32 from vector<1xf32> +// CHECK: %[[RES:.*]] = vector.insertelement %[[EXTRACT0]], %[[UB]][] : vector +// CHECK: return %[[RES]] : vector // CHECK: } func.func @shape_cast_1d0d(%arg0 : vector<1xf32>) -> vector { diff --git a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir index 219a72df52a19..83395504e8c74 100644 --- a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir +++ b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir @@ -2,9 +2,9 @@ // CHECK-LABEL: func @transpose23 // CHECK-SAME: %[[A:.*]]: vector<2x3xf32> -// CHECK: %[[Z:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<2x3xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[Z]] [0, 0] : f32 into vector<3x2xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : f32 into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : f32 from vector<2x3xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [1, 0] : f32 into vector<3x2xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][0, 2] : f32 from vector<2x3xf32> diff --git a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir index dcd6b01974cf3..8af1b600ad0a4 100644 --- a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir +++ b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir @@ -97,6 +97,16 @@ gpu.func @test_load_nd_vc_3(%src: memref<24x32xf32>) { gpu.return } +// CHECK: func @test_load_nd_vc_4(%[[arg0:.*]]: memref<24x32xf32>) { +gpu.func @test_load_nd_vc_4(%src: memref<24x32xf32>) { + // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.sg_map> + %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + !xegpu.tensor_desc<32xf32, #xegpu.sg_map> + // CHECK: %[[R1:.*]] = xegpu.load_nd %[[R0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<32xf32, #xegpu.sg_map> -> vector<2xf32> + %2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<32xf32, #xegpu.sg_map> -> vector<2xf32> + gpu.return +} + // CHECK: func @test_store_nd_vc(%[[arg0:.*]]: memref<24x32xf16>) { gpu.func @test_store_nd_vc(%dst: memref<24x32xf16>) { // CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<24x32xf16> @@ -132,6 +142,18 @@ gpu.func @test_store_nd_vc_3(%src: memref<24x32xf16>) { gpu.return } +// CHECK: func @test_store_nd_vc_4(%[[arg0:.*]]: memref<24x32xf16>) { +gpu.func @test_store_nd_vc_4(%src: memref<24x32xf16>) { + // CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<2xf16> + %1 = arith.constant dense<1.0>: vector<2xf16> + // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16, #xegpu.sg_map> + %2 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf16> -> + !xegpu.tensor_desc<32xf16, #xegpu.sg_map> + // CHECK: xegpu.store_nd %[[C]], %[[R0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : vector<2xf16>, !xegpu.tensor_desc<32xf16, #xegpu.sg_map> + xegpu.store_nd %1, %2 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}>: vector<2xf16>, !xegpu.tensor_desc<32xf16, #xegpu.sg_map> + gpu.return +} + // CHECK: gpu.func @test_create_update_nd_tdesc_vc(%[[arg0:.*]]: memref<24x32xf32>) { gpu.func @test_create_update_nd_tdesc_vc(%src: memref<24x32xf32>) { // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32> diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir index 201f72120cf2c..9162e0012f6d5 100644 --- a/mlir/test/Dialect/XeGPU/invalid.mlir +++ b/mlir/test/Dialect/XeGPU/invalid.mlir @@ -17,7 +17,7 @@ func.func @test_create_nd_tdesc_vc_2(%src: memref<24x32xf32>) { // ----- func.func @test_create_nd_tdesc_vc_3(%src: memref<2x24x32xf32, 3>) { - // expected-error@+1 {{SLM is not supported for 2D Block TensorDesc}} + // expected-error@+1 {{SLM is not supported for 2D block tensor}} %1 = xegpu.create_nd_tdesc %src[0, 0, 0] : memref<2x24x32xf32, 3> -> !xegpu.tensor_desc<8x16xf32, #xegpu.block_tdesc_attr> return } @@ -82,16 +82,33 @@ func.func @test_load_nd_vc_4(%src: memref<24x32xf32>) { %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map> // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} - %2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map> -> vector<8x2xf32> + %2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint, + l2_hint = #xegpu.cache_hint}> + : !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map> + -> vector<8x2xf32> return } // ----- func.func @test_load_nd_vc_5(%src: memref<24x32xf32>) { %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> - !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + !xegpu.tensor_desc<16xf32, #xegpu.sg_map> // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} - %2 = xegpu.load_nd %1: !xegpu.tensor_desc<16xf32, #xegpu.sg_map> -> vector<16xf32> + %2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint, + l2_hint = #xegpu.cache_hint}> + : !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + -> vector<8xf32> + return +} + +// ----- +func.func @test_load_nd_vc_6(%src: memref<24x32xf32>) { + %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + !xegpu.tensor_desc<8x16xf32> + // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} + %2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint, + l2_hint = #xegpu.cache_hint}> + : !xegpu.tensor_desc<8x16xf32> -> vector<8x1xf32> return } @@ -116,6 +133,35 @@ func.func @test_store_nd_vc_2(%dst: memref<16xf16>) { return } +// ----- +func.func @test_store_nd_vc_3(%dst: memref<24x32xf32>, %data: vector<8x2xf32>) { + %1 = xegpu.create_nd_tdesc %dst[0, 0] : memref<24x32xf32> -> + !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map> + // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} + xegpu.store_nd %data, %1 + : vector<8x2xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @test_store_nd_vc_4(%dst: memref<24x32xf32>, %data: vector<2xf32>) { + %1 = xegpu.create_nd_tdesc %dst[0, 0] : memref<24x32xf32> -> + !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} + xegpu.store_nd %data, %1 + : vector<2xf32>, !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @test_store_nd_vc_5(%dst: memref<24x32xf32>, %data: vector<8x1xf32>) { + %1 = xegpu.create_nd_tdesc %dst[0, 0] : memref<24x32xf32> -> + !xegpu.tensor_desc<8x16xf32> + // expected-error@+1 {{Result shape doesn't match TensorDesc shape.}} + xegpu.store_nd %data, %1 : vector<8x1xf32>, !xegpu.tensor_desc<8x16xf32> + return +} + // ----- func.func @test_update_nd_offset_1(%dst: memref<16xf16>) { %0 = arith.constant dense<[0, 2, 4, 6, 8, 10, 12, 14]> : vector<8xindex> @@ -137,8 +183,8 @@ func.func @test_create_tdesc_vc_1(%src: ui64) { // ----- func.func @test_create_tdesc_vc_2(%src: ui64) { %0 = arith.constant dense<[0, 2, 4, 6, 8, 10, 12, 14]> : vector<8xindex> - // expected-error@+1 {{Incorrect TensorDesc shape}} %1 = xegpu.create_tdesc %src, %0 : ui64, vector<8xindex> + // expected-error@+1 {{expected chunk blocks for 2D tensor}} -> !xegpu.tensor_desc<8x4xf16, #xegpu.scatter_tdesc_attr<>> return } @@ -173,7 +219,7 @@ func.func @test_prefetch_vc_2(%src: ui64) { // ----- func.func @test_create_tdesc_sg_map_1(%src: ui64) { %cst = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex> - // expected-error@+1 {{Detected a conflict between SG map's work-item layout and TensorDesc shape. Check the index of `subgroup_size` in WI layout map}} + // expected-error@+1 {{outer layout distribution and data mapping must be 1 for 1D tensor}} %1 = xegpu.create_tdesc %src, %cst : ui64, vector<4xindex> -> !xegpu.tensor_desc<4xf32, #xegpu.scatter_tdesc_attr<>, #xegpu.sg_map> return } @@ -181,7 +227,7 @@ func.func @test_create_tdesc_sg_map_1(%src: ui64) { // ----- func.func @test_create_tdesc_sg_map_2(%src: ui64) { %cst = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex> - // expected-error@+1 {{TensorDesc's SG map only supports multiple elements contiguous along rows}} + // expected-error@+1 {{cannot map over non-contiguous scattered row elements}} %1 = xegpu.create_tdesc %src, %cst : ui64, vector<4xindex> -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr, #xegpu.sg_map> return } @@ -189,7 +235,7 @@ func.func @test_create_tdesc_sg_map_2(%src: ui64) { // ----- func.func @test_create_tdesc_sg_map_3(%src: ui64) { %cst = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex> - // expected-error@+1 {{TensorDesc's chunkSize must match WI's data mapping}} + // expected-error@+1 {{work item data mapping must match the number of contiguous elements}} %1 = xegpu.create_tdesc %src, %cst : ui64, vector<4xindex> -> !xegpu.tensor_desc<4x3xf32, #xegpu.scatter_tdesc_attr, #xegpu.sg_map> return } @@ -315,4 +361,109 @@ func.func @test_atomic_rmw(%src: ui64, %value : vector<16x4xf32>, %mask : vector // expected-error@+1 {{failed to verify that all of {tensorDesc, value, result} have same shape}} xegpu.atomic_rmw addf %1, %mask, %value: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr>, vector<16xi1>, vector<16x4xf32> -> vector<16x8xf32> return -} \ No newline at end of file +} + +// ----- +func.func @tensor_desc_invalid_rank(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{expected 1D or 2D tensor}} + !xegpu.tensor_desc<16x2x2xf32> + return +} + +// ----- +func.func @tensor_desc_invalid_rank_1(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{expected 1D or 2D tensor}} + !xegpu.tensor_desc + return +} + +// ----- +func.func @tensor_desc_1D_invalid_map_layout(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{outer layout distribution and data mapping must be 1 for 1D tensor}} + !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_1D_invalid_map_data(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{outer layout distribution and data mapping must be 1 for 1D tensor}} + !xegpu.tensor_desc<16xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_invalid_map_layout(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{cannot distribute 8 over 16 work items with 1 elements each}} + !xegpu.tensor_desc<4x8xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_invalid_map_layout_1(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{cannot distribute 4 over 8 work items with 1 elements each}} + !xegpu.tensor_desc<4x8xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_invalid_map_data(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{cannot distribute 4 over 2 work items with 4 elements each}} + !xegpu.tensor_desc<4x8xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_invalid_map_data_1(%src: memref<24x32xf32>) { + %0 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> + // expected-error@+1 {{cannot distribute 4 over 8 work items with 1 elements each}} + !xegpu.tensor_desc<4x8xf32, #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_scatter_invalid_map_data(%src: ui64) { + %0 = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex> + %1 = xegpu.create_tdesc %src, %0 : ui64, vector<4xindex> -> + // expected-error@+1 {{cannot map over non-contiguous scattered row elements}} + !xegpu.tensor_desc<4x2xf32, + #xegpu.scatter_tdesc_attr, + #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_scatter_invalid_map_data_1(%src: ui64, %offsets: vector<16xindex>) { + %1 = xegpu.create_tdesc %src, %offsets : ui64, vector<16xindex> -> + // expected-error@+1 {{work item data mapping must match the number of contiguous elements}} + !xegpu.tensor_desc<16xf32, + #xegpu.scatter_tdesc_attr, + #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_scatter_invalid_chunk_size_1D(%src: ui64, %offsets: vector<16xindex>) { + %1 = xegpu.create_tdesc %src, %offsets : ui64, vector<16xindex> -> + // expected-error@+1 {{expected non-contiguous elements for 1D tensor}} + !xegpu.tensor_desc<16xf32, + #xegpu.scatter_tdesc_attr, + #xegpu.sg_map> + return +} + +// ----- +func.func @tensor_desc_scatter_invalid_chunk_size_2D(%src: ui64, %offsets: vector<16xindex>) { + %1 = xegpu.create_tdesc %src, %offsets : ui64, vector<16xindex> -> + // expected-error@+1 {{expected chunk blocks for 2D tensor}} + !xegpu.tensor_desc<16x2xf32, + #xegpu.scatter_tdesc_attr, + #xegpu.sg_map> + return +} diff --git a/mlir/test/Target/LLVMIR/nvvm/tcgen05-barriers.mlir b/mlir/test/Target/LLVMIR/nvvm/tcgen05-barriers.mlir new file mode 100644 index 0000000000000..7536a4567e34e --- /dev/null +++ b/mlir/test/Target/LLVMIR/nvvm/tcgen05-barriers.mlir @@ -0,0 +1,56 @@ +// RUN: mlir-opt -split-input-file -verify-diagnostics %s +// RUN: mlir-translate -mlir-to-llvmir -split-input-file -verify-diagnostics %s | FileCheck %s --check-prefix=CHECK-LLVM + +// CHECK-LABEL: @llvm_nvvm_tcgen05_fence +llvm.func @llvm_nvvm_tcgen05_fence() { + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.fence.before.thread.sync() + nvvm.tcgen05.fence #nvvm.tcgen05_fence + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.fence.after.thread.sync() + nvvm.tcgen05.fence #nvvm.tcgen05_fence + + llvm.return +} + +// CHECK-LABEL: @llvm_nvvm_tcgen05_wait +llvm.func @llvm_nvvm_tcgen05_wait() { + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.wait.ld() + nvvm.tcgen05.wait #nvvm.tcgen05_wait + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.wait.st() + nvvm.tcgen05.wait #nvvm.tcgen05_wait + + llvm.return +} + +// CHECK-LABEL: @llvm_nvvm_tcgen05_commit_generic +llvm.func @llvm_nvvm_tcgen05_commit_generic(%barrier : !llvm.ptr, %cta_mask : i16) { + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.cg1(ptr %{{.*}}) + nvvm.tcgen05.commit %barrier : !llvm.ptr + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.cg2(ptr %{{.*}}) + nvvm.tcgen05.commit %barrier {group = #nvvm.tcgen05_group} : !llvm.ptr + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %{{.*}}, i16 %{{.*}}) + nvvm.tcgen05.commit %barrier, multicast_mask = %cta_mask : !llvm.ptr, i16 + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %{{.*}}, i16 %{{.*}}) + nvvm.tcgen05.commit %barrier, multicast_mask = %cta_mask {group = #nvvm.tcgen05_group} : !llvm.ptr, i16 + llvm.return +} + +// CHECK-LABEL: @llvm_nvvm_tcgen05_commit_shared +llvm.func @llvm_nvvm_tcgen05_commit_shared(%barrier : !llvm.ptr<3>, %cta_mask : i16) { + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %{{.*}}) + nvvm.tcgen05.commit %barrier : !llvm.ptr<3> + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %{{.*}}) + nvvm.tcgen05.commit %barrier {group = #nvvm.tcgen05_group} : !llvm.ptr<3> + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %{{.*}}, i16 %{{.*}}) + nvvm.tcgen05.commit %barrier, multicast_mask = %cta_mask : !llvm.ptr<3>, i16 + + // CHECK-LLVM: call void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %{{.*}}, i16 %{{.*}}) + nvvm.tcgen05.commit %barrier, multicast_mask = %cta_mask {group = #nvvm.tcgen05_group} : !llvm.ptr<3>, i16 + llvm.return +} diff --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir index 32da4d9c26bd1..2ba7f23258e7f 100644 --- a/mlir/test/Target/SPIRV/group-ops.mlir +++ b/mlir/test/Target/SPIRV/group-ops.mlir @@ -103,5 +103,15 @@ spirv.module Logical GLSL450 requires #spirv.vce { %0 = spirv.KHR.GroupFMul %value : f32 spirv.ReturnValue %0: f32 } +} + +// ----- +spirv.module Logical GLSL450 requires #spirv.vce { + // CHECK-LABEL: @group_non_uniform_ballot_bit_count + spirv.func @group_non_uniform_ballot_bit_count(%value: vector<4xi32>) -> i32 "None" { + // CHECK: spirv.GroupNonUniformBallotBitCount {{%.*}} : vector<4xi32> -> i32 + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + spirv.ReturnValue %0 : i32 + } } diff --git a/mlir/test/Transforms/print-op-graph-back-edges.mlir b/mlir/test/Transforms/print-op-graph-back-edges.mlir index ed922dd7cb13b..7950125e2f735 100644 --- a/mlir/test/Transforms/print-op-graph-back-edges.mlir +++ b/mlir/test/Transforms/print-op-graph-back-edges.mlir @@ -1,21 +1,21 @@ // RUN: mlir-opt -view-op-graph %s -o %t 2>&1 | FileCheck -check-prefix=DFG %s // DFG-LABEL: digraph G { -// DFG: compound = true; -// DFG: subgraph cluster_1 { -// DFG: v2 [label = " ", shape = plain]; -// DFG: label = "builtin.module : ()\n"; -// DFG: subgraph cluster_3 { -// DFG: v4 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v5 [fillcolor = "0.000000 1.0 1.0", label = "arith.addi : (index)\n\noverflowFlags: #arith.overflow v5 [label = "0", style = solid]; -// DFG: v7 -> v5 [label = "1", style = solid]; -// DFG: } +// DFG-NEXT: compound = true; +// DFG-NEXT: subgraph cluster_1 { +// DFG-NEXT: v2 [label = " ", shape = plain]; +// DFG-NEXT: label = "builtin.module : ()\l"; +// DFG-NEXT: subgraph cluster_3 { +// DFG-NEXT: v4 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v5 [fillcolor = "0.000000 0.3 0.95", label = "{{\{\{}} %c0| %c1}|arith.addi\l\loverflowFlags: #arith.overflow\ %0 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v6 [fillcolor = "0.333333 0.3 0.95", label = "{arith.constant\l\lvalue: 0 : index\l|{ %c0 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v7 [fillcolor = "0.333333 0.3 0.95", label = "{arith.constant\l\lvalue: 1 : index\l|{ %c1 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v6:res_c0:s -> v5:arg_c0:n[style = solid]; +// DFG-NEXT: v7:res_c1:s -> v5:arg_c1:n[style = solid]; +// DFG-NEXT: } module { %add = arith.addi %c0, %c1 : index diff --git a/mlir/test/Transforms/print-op-graph-cycles.mlir b/mlir/test/Transforms/print-op-graph-cycles.mlir index 7e4eb5616a28b..ba989544419f3 100644 --- a/mlir/test/Transforms/print-op-graph-cycles.mlir +++ b/mlir/test/Transforms/print-op-graph-cycles.mlir @@ -1,45 +1,45 @@ // RUN: mlir-opt -view-op-graph -allow-unregistered-dialect %s -o %t 2>&1 | FileCheck -check-prefix=DFG %s // DFG-LABEL: digraph G { -// DFG: compound = true; -// DFG: subgraph cluster_1 { -// DFG: v2 [label = " ", shape = plain]; -// DFG: label = "builtin.module : ()\n"; -// DFG: subgraph cluster_3 { -// DFG: v4 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: subgraph cluster_5 { -// DFG: v6 [label = " ", shape = plain]; -// DFG: label = "test.graph_region : ()\n"; -// DFG: subgraph cluster_7 { -// DFG: v8 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v9 [fillcolor = "0.000000 1.0 1.0", label = "op1 : (i32)\n", shape = ellipse, style = filled]; -// DFG: subgraph cluster_10 { -// DFG: v11 [label = " ", shape = plain]; -// DFG: label = "test.ssacfg_region : (i32)\n"; -// DFG: subgraph cluster_12 { -// DFG: v13 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v14 [fillcolor = "0.166667 1.0 1.0", label = "op2 : (i32)\n", shape = ellipse, style = filled]; -// DFG: } -// DFG: } -// DFG: v15 [fillcolor = "0.166667 1.0 1.0", label = "op2 : (i32)\n", shape = ellipse, style = filled]; -// DFG: v16 [fillcolor = "0.500000 1.0 1.0", label = "op3 : (i32)\n", shape = ellipse, style = filled]; -// DFG: } -// DFG: } -// DFG: } -// DFG: } -// DFG: v9 -> v9 [label = "0", style = solid]; -// DFG: v15 -> v9 [label = "1", style = solid]; -// DFG: v9 -> v14 [label = "0", style = solid]; -// DFG: v11 -> v14 [ltail = cluster_10, style = solid]; -// DFG: v15 -> v14 [label = "2", style = solid]; -// DFG: v16 -> v14 [label = "3", style = solid]; -// DFG: v9 -> v15 [label = "0", style = solid]; -// DFG: v16 -> v15 [label = "1", style = solid]; -// DFG: v9 -> v16 [label = "", style = solid]; -// DFG: } +// DFG-NEXT: compound = true; +// DFG-NEXT: subgraph cluster_1 { +// DFG-NEXT: v2 [label = " ", shape = plain]; +// DFG-NEXT: label = "builtin.module : ()\l"; +// DFG-NEXT: subgraph cluster_3 { +// DFG-NEXT: v4 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: subgraph cluster_5 { +// DFG-NEXT: v6 [label = " ", shape = plain]; +// DFG-NEXT: label = "test.graph_region : ()\l"; +// DFG-NEXT: subgraph cluster_7 { +// DFG-NEXT: v8 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v9 [fillcolor = "0.000000 0.3 0.95", label = "{{\{\{}} %0| %2}|op1\l|{ %0 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: subgraph cluster_10 { +// DFG-NEXT: v11 [label = " ", shape = plain]; +// DFG-NEXT: label = "test.ssacfg_region : (i32)\l"; +// DFG-NEXT: subgraph cluster_12 { +// DFG-NEXT: v13 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v14 [fillcolor = "0.166667 0.3 0.95", label = "{{\{\{}} %0| %1| %2| %3}|op2\l|{ %4 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v15 [fillcolor = "0.166667 0.3 0.95", label = "{{\{\{}} %0| %3}|op2\l|{ %2 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v16 [fillcolor = "0.500000 0.3 0.95", label = "{{\{\{}} %0}|op3\l|{ %3 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v9:res_0:s -> v9:arg_0:n[style = solid]; +// DFG-NEXT: v15:res_2:s -> v9:arg_2:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v14:arg_0:n[style = solid]; +// DFG-NEXT: v11 -> v14:arg_1:n[ltail = cluster_10, style = solid]; +// DFG-NEXT: v15:res_2:s -> v14:arg_2:n[style = solid]; +// DFG-NEXT: v16:res_3:s -> v14:arg_3:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v15:arg_0:n[style = solid]; +// DFG-NEXT: v16:res_3:s -> v15:arg_3:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v16:arg_0:n[style = solid]; +// DFG-NEXT: } "test.graph_region"() ({ // A Graph region %1 = "op1"(%1, %3) : (i32, i32) -> (i32) // OK: %1, %3 allowed here diff --git a/mlir/test/Transforms/print-op-graph.mlir b/mlir/test/Transforms/print-op-graph.mlir index df03194a663d9..440b037d78092 100644 --- a/mlir/test/Transforms/print-op-graph.mlir +++ b/mlir/test/Transforms/print-op-graph.mlir @@ -6,49 +6,49 @@ // DFG: subgraph {{.*}} // DFG: label = "func.func{{.*}}merge_blocks // DFG: subgraph {{.*}} { -// DFG: v[[ARG0:.*]] [label = "arg0" +// DFG: v[[ARG0:.*]] [label = " %arg0 i32" // DFG: v[[CONST10:.*]] [{{.*}}label ={{.*}}10 : i32 // DFG: subgraph [[CLUSTER_MERGE_BLOCKS:.*]] { // DFG: v[[ANCHOR:.*]] [label = " ", shape = plain] // DFG: label = "test.merge_blocks // DFG: subgraph {{.*}} { -// DFG: v[[TEST_BR:.*]] [{{.*}}label = "test.br +// DFG: v[[TEST_BR:.*]] [{{.*}}label = "{{.*}}test.br // DFG: } // DFG: subgraph {{.*}} { // DFG: } // DFG: } -// DFG: v[[TEST_RET:.*]] [{{.*}}label = "test.return -// DFG: v[[ARG0]] -> v[[TEST_BR]] -// DFG: v[[CONST10]] -> v[[TEST_BR]] -// DFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; -// DFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; +// DFG: v[[TEST_RET:.*]] [{{.*}}label = "{{.*}}test.return +// DFG: v[[ARG0]]:res_arg0:s -> v[[TEST_BR]]:arg_arg0:n +// DFG: v[[CONST10]]:res_c10_i32:s -> v[[TEST_BR]] +// DFG: v[[ANCHOR]] -> v[[TEST_RET]]:arg_1_0:n[ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; +// DFG: v[[ANCHOR]] -> v[[TEST_RET]]:arg_1_1:n[ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; // CFG-LABEL: digraph G { // CFG: subgraph {{.*}} { // CFG: subgraph {{.*}} // CFG: label = "func.func{{.*}}merge_blocks // CFG: subgraph {{.*}} { -// CFG: v[[C1:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C2:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C3:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C4:.*]] [{{.*}}label = "arith.constant -// CFG: v[[TEST_FUNC:.*]] [{{.*}}label = "test.func +// CFG: v[[C1:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C2:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C3:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C4:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[TEST_FUNC:.*]] [{{.*}}label = "{test.func // CFG: subgraph [[CLUSTER_MERGE_BLOCKS:.*]] { // CFG: v[[ANCHOR:.*]] [label = " ", shape = plain] // CFG: label = "test.merge_blocks // CFG: subgraph {{.*}} { -// CFG: v[[TEST_BR:.*]] [{{.*}}label = "test.br +// CFG: v[[TEST_BR:.*]] [{{.*}}label = "{{.*}}test.br // CFG: } // CFG: subgraph {{.*}} { // CFG: } // CFG: } -// CFG: v[[TEST_RET:.*]] [{{.*}}label = "test.return +// CFG: v[[TEST_RET:.*]] [{{.*}}label = "{{.*}}test.return // CFG: v[[C1]] -> v[[C2]] // CFG: v[[C2]] -> v[[C3]] // CFG: v[[C3]] -> v[[C4]] // CFG: v[[C4]] -> v[[TEST_FUNC]] -// CFG: v[[TEST_FUNC]] -> v[[ANCHOR]] [lhead = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; -// CFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; +// CFG: v[[TEST_FUNC]] -> v[[ANCHOR]][lhead = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; +// CFG: v[[ANCHOR]] -> v[[TEST_RET]][ltail = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; func.func @merge_blocks(%arg0: i32, %arg1 : i32) -> () { %0 = arith.constant dense<[[0, 1], [2, 3]]> : tensor<2x2xi32> diff --git a/offload/test/sanitizer/kernel_crash_many.c b/offload/test/sanitizer/kernel_crash_many.c index f1d17ca2b76e2..9fd8af48f51fe 100644 --- a/offload/test/sanitizer/kernel_crash_many.c +++ b/offload/test/sanitizer/kernel_crash_many.c @@ -37,36 +37,36 @@ int main(void) { // CHECK: Kernel 1: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 2: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 3: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 4: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 5: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 6: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK: Kernel 7: {{.*}} (__omp_offloading_{{.*}}_main_l22) // CHECK: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_crash_many.c: +// DEBUG: main {{.*}}kernel_crash_many.c // // CHECK-NOT: Kernel {{[[0-9]]+}}: diff --git a/offload/test/sanitizer/kernel_trap.c b/offload/test/sanitizer/kernel_trap.c index d1742dbd70c23..3a531bd74c980 100644 --- a/offload/test/sanitizer/kernel_trap.c +++ b/offload/test/sanitizer/kernel_trap.c @@ -39,5 +39,4 @@ int main(void) { // CHECK: OFFLOAD ERROR: Kernel 'omp target in main @ 30 (__omp_offloading_{{.*}}_main_l30)' // CHECK: OFFLOAD ERROR: execution interrupted by hardware trap instruction // TRACE: launchKernel -// CHECK: main // clang-format on diff --git a/offload/test/sanitizer/kernel_trap.cpp b/offload/test/sanitizer/kernel_trap.cpp index c67b3857fabba..44858be6cd3f6 100644 --- a/offload/test/sanitizer/kernel_trap.cpp +++ b/offload/test/sanitizer/kernel_trap.cpp @@ -47,6 +47,6 @@ int main(void) { // TRACE: launchKernel // NDEBG: cxx_function_name(int, S*) // NDEBG: main -// DEBUG: cxx_function_name(int, S*) {{.*}}kernel_trap.cpp: -// DEBUG: main {{.*}}kernel_trap.cpp: +// DEBUG: cxx_function_name(int, S*) {{.*}}kernel_trap.cpp +// DEBUG: main {{.*}}kernel_trap.cpp // clang-format on diff --git a/offload/test/sanitizer/kernel_trap_many.c b/offload/test/sanitizer/kernel_trap_many.c index f2e63794168b2..061c0fe225d4b 100644 --- a/offload/test/sanitizer/kernel_trap_many.c +++ b/offload/test/sanitizer/kernel_trap_many.c @@ -32,4 +32,4 @@ int main(void) { // TRACE: OFFLOAD ERROR: execution interrupted by hardware trap instruction // TRACE: launchKernel // NDEBG: main -// DEBUG: main {{.*}}kernel_trap_many.c: +// DEBUG: main {{.*}}kernel_trap_many.c diff --git a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel index 561f2b8f408f0..61f4700b057ab 100644 --- a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel @@ -224,6 +224,8 @@ cc_test( # Skip a test that relies on reading files in a way that doesn't easily # work with Bazel. "--gtest_filter=-NativeSymbolReuseTest.*", + # TODO: this test is failing on some configs, investigate and re-enable it. + "--gtest_filter=-DebugLineBasicFixture.LookupAddressRangeWithStmtSequenceOffset", ], features = ["-layering_check"], # #include "../lib/CodeGen/AsmPrinter/DwarfStringPool.h" deps = [ diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 5f7aaf7f8f31b..e07891f004850 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -5224,6 +5224,7 @@ cc_library( ":Support", ":TensorDialect", ":TransformUtils", + ":UBDialect", ":VectorDialect", ":VectorEnumsIncGen", ":VectorInterfaces",