diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index ee77e83363d37..d0193a4ec2faa 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -37,7 +37,7 @@ jobs: stage1: if: github.repository_owner == 'llvm' runs-on: libcxx-self-hosted-linux - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 + container: ghcr.io/llvm/libcxx-linux-builder:b319dfef21f6c7b0bc6a356d6b9f41a3b3b98ae9 continue-on-error: false strategy: fail-fast: false @@ -48,8 +48,8 @@ jobs: 'generic-cxx26', 'generic-modules' ] - cc: [ 'clang-20' ] - cxx: [ 'clang++-20' ] + cc: [ 'clang-21' ] + cxx: [ 'clang++-21' ] include: - config: 'generic-gcc' cc: 'gcc-14' @@ -75,7 +75,7 @@ jobs: stage2: if: github.repository_owner == 'llvm' runs-on: libcxx-self-hosted-linux - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 + container: ghcr.io/llvm/libcxx-linux-builder:b319dfef21f6c7b0bc6a356d6b9f41a3b3b98ae9 needs: [ stage1 ] continue-on-error: false strategy: @@ -88,18 +88,22 @@ jobs: 'generic-cxx20', 'generic-cxx23' ] - cc: [ 'clang-20' ] - cxx: [ 'clang++-20' ] + cc: [ 'clang-21' ] + cxx: [ 'clang++-21' ] include: - config: 'generic-gcc-cxx11' cc: 'gcc-14' cxx: 'g++-14' - - config: 'generic-cxx23' - cc: 'clang-18' - cxx: 'clang++-18' + - config: 'generic-cxx26' + cc: 'clang-20' + cxx: 'clang++-20' - config: 'generic-cxx26' cc: 'clang-19' cxx: 'clang++-19' + # Release transition + - config: 'generic-cxx23' + cc: 'clang-18' + cxx: 'clang++-18' steps: - uses: actions/checkout@v4 - name: ${{ matrix.config }} @@ -163,14 +167,14 @@ jobs: - config: 'generic-msan' machine: libcxx-self-hosted-linux runs-on: ${{ matrix.machine }} - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 + container: ghcr.io/llvm/libcxx-linux-builder:b319dfef21f6c7b0bc6a356d6b9f41a3b3b98ae9 steps: - uses: actions/checkout@v4 - name: ${{ matrix.config }} run: libcxx/utils/ci/run-buildbot ${{ matrix.config }} env: - CC: clang-20 - CXX: clang++-20 + CC: clang-21 + CXX: clang++-21 - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 if: always() with: diff --git a/bolt/CMakeLists.txt b/bolt/CMakeLists.txt index 04db160b64b05..f5ffa81227064 100644 --- a/bolt/CMakeLists.txt +++ b/bolt/CMakeLists.txt @@ -202,3 +202,11 @@ endif() configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/bolt/RuntimeLibs/RuntimeLibraryVariables.inc.in ${CMAKE_CURRENT_BINARY_DIR}/include/bolt/RuntimeLibs/RuntimeLibraryVariables.inc @ONLY) + +set(BOLT_ENUM_TARGETS "") +foreach(t ${BOLT_TARGETS_TO_BUILD}) + set(BOLT_ENUM_TARGETS "${BOLT_ENUM_TARGETS}BOLT_TARGET(${t})\n") +endforeach(t) + +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/bolt/Core/TargetConfig.def.in + ${CMAKE_CURRENT_BINARY_DIR}/include/bolt/Core/TargetConfig.def @ONLY) diff --git a/bolt/include/bolt/Core/TargetConfig.def.in b/bolt/include/bolt/Core/TargetConfig.def.in new file mode 100644 index 0000000000000..a52ebd92b56fd --- /dev/null +++ b/bolt/include/bolt/Core/TargetConfig.def.in @@ -0,0 +1,23 @@ +//===-- TargetConfig.def.in - Information about available targets ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is configured by the build system to define the available bolt +// targets. +// +// The variant of this file not ending with .in has been autogenerated by the +// LLVM build. Do not edit! +// +//===----------------------------------------------------------------------===// + +#ifndef BOLT_TARGET +# error Please define the macro BOLT_TARGET(TargetName) +#endif + +@BOLT_ENUM_TARGETS@ + +#undef BOLT_TARGET diff --git a/bolt/tools/binary-analysis/CMakeLists.txt b/bolt/tools/binary-analysis/CMakeLists.txt index 841fc5b371185..29f224e0f66ff 100644 --- a/bolt/tools/binary-analysis/CMakeLists.txt +++ b/bolt/tools/binary-analysis/CMakeLists.txt @@ -1,5 +1,5 @@ set(LLVM_LINK_COMPONENTS - ${LLVM_TARGETS_TO_BUILD} + ${BOLT_TARGETS_TO_BUILD} MC Object Support diff --git a/bolt/tools/binary-analysis/binary-analysis.cpp b/bolt/tools/binary-analysis/binary-analysis.cpp index b03fee3e025ae..0e3584eeedd18 100644 --- a/bolt/tools/binary-analysis/binary-analysis.cpp +++ b/bolt/tools/binary-analysis/binary-analysis.cpp @@ -88,13 +88,15 @@ int main(int argc, char **argv) { llvm_shutdown_obj Y; // Call llvm_shutdown() on exit. // Initialize targets and assembly printers/parsers. - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" ParseCommandLine(argc, argv); diff --git a/bolt/tools/driver/CMakeLists.txt b/bolt/tools/driver/CMakeLists.txt index 9bf9ff85edc7b..4b3c7416de974 100644 --- a/bolt/tools/driver/CMakeLists.txt +++ b/bolt/tools/driver/CMakeLists.txt @@ -1,5 +1,5 @@ set(LLVM_LINK_COMPONENTS - ${LLVM_TARGETS_TO_BUILD} + ${BOLT_TARGETS_TO_BUILD} MC Object Support diff --git a/bolt/tools/driver/llvm-bolt.cpp b/bolt/tools/driver/llvm-bolt.cpp index f151cf5f63fc5..6b6714723fa3b 100644 --- a/bolt/tools/driver/llvm-bolt.cpp +++ b/bolt/tools/driver/llvm-bolt.cpp @@ -183,13 +183,15 @@ int main(int argc, char **argv) { std::string ToolPath = llvm::sys::fs::getMainExecutable(argv[0], nullptr); // Initialize targets and assembly printers/parsers. - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" ToolName = argv[0]; diff --git a/bolt/tools/heatmap/CMakeLists.txt b/bolt/tools/heatmap/CMakeLists.txt index acddc7a50e8b1..c5d3f67413929 100644 --- a/bolt/tools/heatmap/CMakeLists.txt +++ b/bolt/tools/heatmap/CMakeLists.txt @@ -1,5 +1,5 @@ set(LLVM_LINK_COMPONENTS - ${LLVM_TARGETS_TO_BUILD} + ${BOLT_TARGETS_TO_BUILD} MC Object Support diff --git a/bolt/tools/heatmap/heatmap.cpp b/bolt/tools/heatmap/heatmap.cpp index 3bb9f2ce7491d..6add36cc6715f 100644 --- a/bolt/tools/heatmap/heatmap.cpp +++ b/bolt/tools/heatmap/heatmap.cpp @@ -76,13 +76,15 @@ int main(int argc, char **argv) { opts::OutputFilename = "-"; // Initialize targets and assembly printers/parsers. - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" ToolName = argv[0]; std::string ToolPath = GetExecutablePath(argv[0]); diff --git a/bolt/tools/llvm-bolt-fuzzer/CMakeLists.txt b/bolt/tools/llvm-bolt-fuzzer/CMakeLists.txt index f21285f634bad..7eaacb74a9da6 100644 --- a/bolt/tools/llvm-bolt-fuzzer/CMakeLists.txt +++ b/bolt/tools/llvm-bolt-fuzzer/CMakeLists.txt @@ -1,5 +1,5 @@ set(LLVM_LINK_COMPONENTS - ${LLVM_TARGETS_TO_BUILD} + ${BOLT_TARGETS_TO_BUILD} ) add_llvm_fuzzer(llvm-bolt-fuzzer diff --git a/bolt/tools/llvm-bolt-fuzzer/llvm-bolt-fuzzer.cpp b/bolt/tools/llvm-bolt-fuzzer/llvm-bolt-fuzzer.cpp index bdb5768a91da1..09049788aebec 100644 --- a/bolt/tools/llvm-bolt-fuzzer/llvm-bolt-fuzzer.cpp +++ b/bolt/tools/llvm-bolt-fuzzer/llvm-bolt-fuzzer.cpp @@ -58,13 +58,16 @@ extern "C" int LLVMFuzzerTestOneInput(const char *Data, size_t Size) { extern "C" LLVM_ATTRIBUTE_USED int LLVMFuzzerInitialize(int *argc, char ***argv) { - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); + // Initialize targets and assembly printers/parsers. +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#include "bolt/Core/TargetConfig.def" return 0; } diff --git a/bolt/unittests/Core/BinaryContext.cpp b/bolt/unittests/Core/BinaryContext.cpp index 0fefa1b83c3c2..09d16966334da 100644 --- a/bolt/unittests/Core/BinaryContext.cpp +++ b/bolt/unittests/Core/BinaryContext.cpp @@ -27,12 +27,15 @@ struct BinaryContextTester : public testing::TestWithParam { protected: void initalizeLLVM() { - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" } void prepareElf() { diff --git a/bolt/unittests/Core/CMakeLists.txt b/bolt/unittests/Core/CMakeLists.txt index 208cf6ced7358..8ac88b701ea05 100644 --- a/bolt/unittests/Core/CMakeLists.txt +++ b/bolt/unittests/Core/CMakeLists.txt @@ -2,7 +2,7 @@ set(LLVM_LINK_COMPONENTS DebugInfoDWARF Object MC - ${LLVM_TARGETS_TO_BUILD} + ${BOLT_TARGETS_TO_BUILD} ) add_bolt_unittest(CoreTests diff --git a/bolt/unittests/Core/MCPlusBuilder.cpp b/bolt/unittests/Core/MCPlusBuilder.cpp index 5488cae366284..d367eb07f7767 100644 --- a/bolt/unittests/Core/MCPlusBuilder.cpp +++ b/bolt/unittests/Core/MCPlusBuilder.cpp @@ -37,12 +37,15 @@ struct MCPlusBuilderTester : public testing::TestWithParam { protected: void initalizeLLVM() { - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" } void prepareElf() { diff --git a/bolt/unittests/Core/MemoryMaps.cpp b/bolt/unittests/Core/MemoryMaps.cpp index 06073d0a82e14..2e1bc4d280aed 100644 --- a/bolt/unittests/Core/MemoryMaps.cpp +++ b/bolt/unittests/Core/MemoryMaps.cpp @@ -38,12 +38,15 @@ struct MemoryMapsTester : public testing::TestWithParam { protected: void initalizeLLVM() { - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - llvm::InitializeAllDisassemblers(); - llvm::InitializeAllTargets(); - llvm::InitializeAllAsmPrinters(); +#define BOLT_TARGET(target) \ + LLVMInitialize##target##TargetInfo(); \ + LLVMInitialize##target##TargetMC(); \ + LLVMInitialize##target##AsmParser(); \ + LLVMInitialize##target##Disassembler(); \ + LLVMInitialize##target##Target(); \ + LLVMInitialize##target##AsmPrinter(); + +#include "bolt/Core/TargetConfig.def" } void prepareElf() { diff --git a/clang-tools-extra/clangd/ClangdServer.h b/clang-tools-extra/clangd/ClangdServer.h index e030bf04122d5..1e612e2ba618e 100644 --- a/clang-tools-extra/clangd/ClangdServer.h +++ b/clang-tools-extra/clangd/ClangdServer.h @@ -184,7 +184,7 @@ class ClangdServer { bool UseDirtyHeaders = false; // If true, parse emplace-like functions in the preamble. - bool PreambleParseForwardingFunctions = false; + bool PreambleParseForwardingFunctions = true; /// Whether include fixer insertions for Objective-C code should use #import /// instead of #include. @@ -501,7 +501,7 @@ class ClangdServer { // Whether the client supports folding only complete lines. bool LineFoldingOnly = false; - bool PreambleParseForwardingFunctions = false; + bool PreambleParseForwardingFunctions = true; bool ImportInsertions = false; diff --git a/clang-tools-extra/clangd/Compiler.h b/clang-tools-extra/clangd/Compiler.h index 4e68da7610ca2..e513e4c40794a 100644 --- a/clang-tools-extra/clangd/Compiler.h +++ b/clang-tools-extra/clangd/Compiler.h @@ -40,7 +40,7 @@ class IgnoreDiagnostics : public DiagnosticConsumer { // Options to run clang e.g. when parsing AST. struct ParseOptions { - bool PreambleParseForwardingFunctions = false; + bool PreambleParseForwardingFunctions = true; bool ImportInsertions = false; }; diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index 2a956ad5b2909..5d9b68d4a7f2a 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -2629,7 +2629,7 @@ with the current table size. .. code-block:: c++ typedef void (*__funcref funcref_t)(); - static __funcref table[0]; + static funcref_t table[0]; size_t getSize() { return __builtin_wasm_table_size(table); @@ -2651,10 +2651,10 @@ or -1. It will return -1 if not enough space could be allocated. .. code-block:: c++ typedef void (*__funcref funcref_t)(); - static __funcref table[0]; + static funcref_t table[0]; // grow returns the new table size or -1 on error. - int grow(__funcref fn, int delta) { + int grow(funcref_t fn, int delta) { int prevSize = __builtin_wasm_table_grow(table, fn, delta); if (prevSize == -1) return -1; diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 6272f32fa845a..a91c764860ccd 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -173,6 +173,7 @@ Bug Fixes to C++ Support Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ +- Fixed type checking when a statement expression ends in an l-value of atomic type. (#GH106576) Miscellaneous Bug Fixes ^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h index 648dae2838e03..2fb9d5888bce4 100644 --- a/clang/include/clang/AST/DeclBase.h +++ b/clang/include/clang/AST/DeclBase.h @@ -492,7 +492,7 @@ class alignas(8) Decl { /// perform non-Decl specific checks based on the object's type and strict /// flex array level. static bool isFlexibleArrayMemberLike( - ASTContext &Context, const Decl *D, QualType Ty, + const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution); diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h index 766821b4fb25c..266b93a64a390 100644 --- a/clang/include/clang/AST/DeclCXX.h +++ b/clang/include/clang/AST/DeclCXX.h @@ -4194,8 +4194,8 @@ class BindingDecl : public ValueDecl { /// decomposition declaration, and when the initializer is type-dependent. Expr *getBinding() const { return Binding; } - // Get the array of Exprs when the binding represents a pack. - llvm::ArrayRef getBindingPackExprs() const; + // Get the array of nested BindingDecls when the binding represents a pack. + llvm::ArrayRef getBindingPackDecls() const; /// Get the decomposition declaration that this binding represents a /// decomposition of. @@ -4246,10 +4246,8 @@ class DecompositionDecl final for (auto *B : Bindings) { B->setDecomposedDecl(this); if (B->isParameterPack() && B->getBinding()) { - for (Expr *E : B->getBindingPackExprs()) { - auto *DRE = cast(E); - auto *NestedB = cast(DRE->getDecl()); - NestedB->setDecomposedDecl(this); + for (BindingDecl *NestedBD : B->getBindingPackDecls()) { + NestedBD->setDecomposedDecl(this); } } } @@ -4278,25 +4276,21 @@ class DecompositionDecl final // Provide a flattened range to visit each binding. auto flat_bindings() const { llvm::ArrayRef Bindings = bindings(); - llvm::ArrayRef PackExprs; + llvm::ArrayRef PackBindings; // Split the bindings into subranges split by the pack. - auto S1 = Bindings.take_until( + llvm::ArrayRef BeforePackBindings = Bindings.take_until( [](BindingDecl *BD) { return BD->isParameterPack(); }); - Bindings = Bindings.drop_front(S1.size()); + Bindings = Bindings.drop_front(BeforePackBindings.size()); if (!Bindings.empty()) { - PackExprs = Bindings.front()->getBindingPackExprs(); + PackBindings = Bindings.front()->getBindingPackDecls(); Bindings = Bindings.drop_front(); } - auto S2 = llvm::map_range(PackExprs, [](Expr *E) { - auto *DRE = cast(E); - return cast(DRE->getDecl()); - }); - - return llvm::concat(std::move(S1), std::move(S2), - std::move(Bindings)); + return llvm::concat(std::move(BeforePackBindings), + std::move(PackBindings), + std::move(Bindings)); } void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override; diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h index cd584d9621a22..ff4f236c1fa88 100644 --- a/clang/include/clang/AST/Expr.h +++ b/clang/include/clang/AST/Expr.h @@ -542,7 +542,7 @@ class Expr : public ValueStmt { /// When IgnoreTemplateOrMacroSubstitution is set, it doesn't consider sizes /// resulting from the substitution of a macro or a template as special sizes. bool isFlexibleArrayMemberLike( - ASTContext &Context, + const ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution = false) const; diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 98ba2bb41bb54..abc65e77da021 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -4633,8 +4633,8 @@ class SubstNonTypeTemplateParmPackExpr : public Expr { } }; -/// Represents a reference to a function parameter pack or init-capture pack -/// that has been substituted but not yet expanded. +/// Represents a reference to a function parameter pack, init-capture pack, +/// or binding pack that has been substituted but not yet expanded. /// /// When a pack expansion contains multiple parameter packs at different levels, /// this node is used to represent a function parameter pack at an outer level @@ -4649,13 +4649,13 @@ class SubstNonTypeTemplateParmPackExpr : public Expr { /// \endcode class FunctionParmPackExpr final : public Expr, - private llvm::TrailingObjects { + private llvm::TrailingObjects { friend class ASTReader; friend class ASTStmtReader; friend TrailingObjects; /// The function parameter pack which was referenced. - VarDecl *ParamPack; + ValueDecl *ParamPack; /// The location of the function parameter pack reference. SourceLocation NameLoc; @@ -4663,35 +4663,34 @@ class FunctionParmPackExpr final /// The number of expansions of this pack. unsigned NumParameters; - FunctionParmPackExpr(QualType T, VarDecl *ParamPack, - SourceLocation NameLoc, unsigned NumParams, - VarDecl *const *Params); + FunctionParmPackExpr(QualType T, ValueDecl *ParamPack, SourceLocation NameLoc, + unsigned NumParams, ValueDecl *const *Params); public: static FunctionParmPackExpr *Create(const ASTContext &Context, QualType T, - VarDecl *ParamPack, + ValueDecl *ParamPack, SourceLocation NameLoc, - ArrayRef Params); + ArrayRef Params); static FunctionParmPackExpr *CreateEmpty(const ASTContext &Context, unsigned NumParams); /// Get the parameter pack which this expression refers to. - VarDecl *getParameterPack() const { return ParamPack; } + ValueDecl *getParameterPack() const { return ParamPack; } /// Get the location of the parameter pack. SourceLocation getParameterPackLocation() const { return NameLoc; } /// Iterators over the parameters which the parameter pack expanded /// into. - using iterator = VarDecl * const *; - iterator begin() const { return getTrailingObjects(); } + using iterator = ValueDecl *const *; + iterator begin() const { return getTrailingObjects(); } iterator end() const { return begin() + NumParameters; } /// Get the number of parameters in this parameter pack. unsigned getNumExpansions() const { return NumParameters; } /// Get an expansion of the parameter pack by index. - VarDecl *getExpansion(unsigned I) const { return begin()[I]; } + ValueDecl *getExpansion(unsigned I) const { return begin()[I]; } SourceLocation getBeginLoc() const LLVM_READONLY { return NameLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return NameLoc; } @@ -5319,59 +5318,6 @@ class BuiltinBitCastExpr final } }; -// Represents an unexpanded pack where the list of expressions are -// known. These are used when structured bindings introduce a pack. -class ResolvedUnexpandedPackExpr final - : public Expr, - private llvm::TrailingObjects { - friend class ASTStmtReader; - friend class ASTStmtWriter; - friend TrailingObjects; - - SourceLocation BeginLoc; - unsigned NumExprs; - - ResolvedUnexpandedPackExpr(SourceLocation BL, QualType QT, unsigned NumExprs); - -public: - static ResolvedUnexpandedPackExpr *CreateDeserialized(ASTContext &C, - unsigned NumExprs); - static ResolvedUnexpandedPackExpr * - Create(ASTContext &C, SourceLocation BeginLoc, QualType T, unsigned NumExprs); - static ResolvedUnexpandedPackExpr *Create(ASTContext &C, - SourceLocation BeginLoc, QualType T, - llvm::ArrayRef Exprs); - - unsigned getNumExprs() const { return NumExprs; } - - llvm::MutableArrayRef getExprs() { - return {getTrailingObjects(), NumExprs}; - } - - llvm::ArrayRef getExprs() const { - return {getTrailingObjects(), NumExprs}; - } - - Expr *getExpansion(unsigned Idx) { return getExprs()[Idx]; } - Expr *getExpansion(unsigned Idx) const { return getExprs()[Idx]; } - - // Iterators - child_range children() { - return child_range((Stmt **)getTrailingObjects(), - (Stmt **)getTrailingObjects() + getNumExprs()); - } - - SourceLocation getBeginLoc() const LLVM_READONLY { return BeginLoc; } - SourceLocation getEndLoc() const LLVM_READONLY { return BeginLoc; } - - // Returns the resolved pack of a decl or nullptr - static ResolvedUnexpandedPackExpr *getFromDecl(Decl *); - - static bool classof(const Stmt *T) { - return T->getStmtClass() == ResolvedUnexpandedPackExprClass; - } -}; - } // namespace clang #endif // LLVM_CLANG_AST_EXPRCXX_H diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h index 560de7da9913a..5964cbaec8e44 100644 --- a/clang/include/clang/AST/RecursiveASTVisitor.h +++ b/clang/include/clang/AST/RecursiveASTVisitor.h @@ -2950,7 +2950,6 @@ DEF_TRAVERSE_STMT(FunctionParmPackExpr, {}) DEF_TRAVERSE_STMT(CXXFoldExpr, {}) DEF_TRAVERSE_STMT(AtomicExpr, {}) DEF_TRAVERSE_STMT(CXXParenListInitExpr, {}) -DEF_TRAVERSE_STMT(ResolvedUnexpandedPackExpr, {}) DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, { if (S->getLifetimeExtendedTemporaryDecl()) { diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index f10af8f5bd6b2..ee1ad214d81df 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -10606,6 +10606,9 @@ def warn_noreturn_function_has_return_expr : Warning< def warn_falloff_noreturn_function : Warning< "function declared 'noreturn' should not return">, InGroup; +def warn_noreturn_coroutine : Warning< + "coroutine %0 cannot be declared 'noreturn' as it always returns a coroutine handle">, + InGroup; def err_noreturn_block_has_return_expr : Error< "block declared 'noreturn' should not return">; def err_carries_dependency_missing_on_first_decl : Error< diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index bfab0baa089cf..383440ddbc0ea 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -365,6 +365,7 @@ LANGOPT(ObjCDisableDirectMethodsForTesting, 1, 0, LANGOPT(CFProtectionBranch , 1, 0, "Control-Flow Branch Protection enabled") ENUM_LANGOPT(CFBranchLabelScheme, CFBranchLabelSchemeKind, 2, CFBranchLabelSchemeKind::Default, "Control-Flow Branch Protection Label Scheme") +LANGOPT(CFProtectionReturn, 1, 0, "Control-Flow Return Protection enabled") LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map") ENUM_LANGOPT(AddressSpaceMapMangling , AddrSpaceMapMangling, 2, ASMM_Target, "OpenCL address space map mangling mode") LANGOPT(IncludeDefaultHeader, 1, 0, "Include default header file for OpenCL") diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td index 3533c5f50742e..ae49671058a01 100644 --- a/clang/include/clang/Basic/StmtNodes.td +++ b/clang/include/clang/Basic/StmtNodes.td @@ -163,7 +163,6 @@ def MaterializeTemporaryExpr : StmtNode; def LambdaExpr : StmtNode; def CXXFoldExpr : StmtNode; def CXXParenListInitExpr: StmtNode; -def ResolvedUnexpandedPackExpr : StmtNode; // C++ Coroutines expressions def CoroutineSuspendExpr : StmtNode; diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index a501b901862b6..c55b964650323 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -232,8 +232,7 @@ void threadSafetyCleanup(BeforeSet *Cache); // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. -typedef std::pair, +typedef std::pair, SourceLocation> UnexpandedParameterPack; diff --git a/clang/include/clang/Sema/Template.h b/clang/include/clang/Sema/Template.h index 4206bd50b13dd..647c4cfa341e1 100644 --- a/clang/include/clang/Sema/Template.h +++ b/clang/include/clang/Sema/Template.h @@ -365,7 +365,7 @@ enum class TemplateSubstitutionKind : char { class LocalInstantiationScope { public: /// A set of declarations. - using DeclArgumentPack = SmallVector; + using DeclArgumentPack = SmallVector; private: /// Reference to the semantic analysis that is performing diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index ad93d50f6a82b..37cdb0fc9faa8 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -1908,7 +1908,6 @@ enum StmtCode { EXPR_PACK_EXPANSION, // PackExpansionExpr EXPR_PACK_INDEXING, // PackIndexingExpr EXPR_SIZEOF_PACK, // SizeOfPackExpr - EXPR_RESOLVED_UNEXPANDED_PACK, // ResolvedUnexpandedPackExpr EXPR_SUBST_NON_TYPE_TEMPLATE_PARM, // SubstNonTypeTemplateParmExpr EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK, // SubstNonTypeTemplateParmPackExpr EXPR_FUNCTION_PARM_PACK, // FunctionParmPackExpr diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 7c70534388b4c..4a791316a6269 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -3898,7 +3898,8 @@ QualType ASTContext::getArrayParameterType(QualType Ty) const { if (Ty->isArrayParameterType()) return Ty; assert(Ty->isConstantArrayType() && "Ty must be an array type."); - const auto *ATy = cast(Ty.getDesugaredType(*this)); + QualType DTy = Ty.getDesugaredType(*this); + const auto *ATy = cast(DTy); llvm::FoldingSetNodeID ID; ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(), ATy->getSizeExpr(), ATy->getSizeModifier(), @@ -3910,7 +3911,7 @@ QualType ASTContext::getArrayParameterType(QualType Ty) const { return QualType(AT, 0); QualType Canonical; - if (!Ty.isCanonical()) { + if (!DTy.isCanonical()) { Canonical = getArrayParameterType(getCanonicalType(Ty)); // Get the new insert position for the node we care about. diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index b3a81f8ff1516..503c58a67adeb 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -272,7 +272,8 @@ bool Compiler::VisitCastExpr(const CastExpr *CE) { CurType = B->getType(); } else { unsigned DerivedOffset = collectBaseOffset(B->getType(), CurType); - if (!this->emitGetPtrBasePop(DerivedOffset, CE)) + if (!this->emitGetPtrBasePop( + DerivedOffset, /*NullOK=*/CE->getType()->isPointerType(), CE)) return false; CurType = B->getType(); } @@ -288,7 +289,8 @@ bool Compiler::VisitCastExpr(const CastExpr *CE) { unsigned DerivedOffset = collectBaseOffset(SubExpr->getType(), CE->getType()); - return this->emitGetPtrDerivedPop(DerivedOffset, CE); + return this->emitGetPtrDerivedPop( + DerivedOffset, /*NullOK=*/CE->getType()->isPointerType(), CE); } case CK_FloatingCast: { diff --git a/clang/lib/AST/ByteCode/Descriptor.cpp b/clang/lib/AST/ByteCode/Descriptor.cpp index 319d1690c1cd0..6017f6dd61cb3 100644 --- a/clang/lib/AST/ByteCode/Descriptor.cpp +++ b/clang/lib/AST/ByteCode/Descriptor.cpp @@ -237,8 +237,7 @@ static void moveRecord(Block *B, std::byte *Src, std::byte *Dst, assert(D); assert(D->ElemRecord); - // FIXME: There might be cases where we need to move over the (v)bases as - // well. + // FIXME: Code duplication. for (const auto &F : D->ElemRecord->fields()) { auto FieldOffset = F.Offset; const auto *SrcDesc = @@ -250,6 +249,26 @@ static void moveRecord(Block *B, std::byte *Src, std::byte *Dst, if (auto Fn = F.Desc->MoveFn) Fn(B, Src + FieldOffset, Dst + FieldOffset, F.Desc); } + + for (const auto &Base : D->ElemRecord->bases()) { + auto BaseOffset = Base.Offset; + const auto *SrcDesc = + reinterpret_cast(Src + BaseOffset) - 1; + auto *DestDesc = reinterpret_cast(Dst + BaseOffset) - 1; + std::memcpy(DestDesc, SrcDesc, sizeof(InlineDescriptor)); + + if (auto Fn = Base.Desc->MoveFn) + Fn(B, Src + BaseOffset, Dst + BaseOffset, Base.Desc); + } + + for (const auto &VBase : D->ElemRecord->virtual_bases()) { + auto VBaseOffset = VBase.Offset; + const auto *SrcDesc = + reinterpret_cast(Src + VBaseOffset) - 1; + auto *DestDesc = + reinterpret_cast(Dst + VBaseOffset) - 1; + std::memcpy(DestDesc, SrcDesc, sizeof(InlineDescriptor)); + } } static BlockCtorFn getCtorPrim(PrimType Type) { diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 0310870f7372e..c07690a3d941c 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -1433,7 +1433,7 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, unsigned Offset = S.getContext().collectBaseOffset( InitialPointeeType->getAsRecordDecl(), OverriderPointeeType->getAsRecordDecl()); - return GetPtrBasePop(S, OpPC, Offset); + return GetPtrBasePop(S, OpPC, Offset, /*IsNullOK=*/true); } return true; diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 10cf21e28437c..ca74046038072 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -1568,10 +1568,20 @@ inline bool GetPtrActiveThisField(InterpState &S, CodePtr OpPC, uint32_t Off) { return true; } -inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) { +inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off, + bool NullOK) { const Pointer &Ptr = S.Stk.pop(); - if (!CheckNull(S, OpPC, Ptr, CSK_Derived)) + if (!NullOK && !CheckNull(S, OpPC, Ptr, CSK_Derived)) return false; + + if (!Ptr.isBlockPointer()) { + // FIXME: We don't have the necessary information in integral pointers. + // The Descriptor only has a record, but that does of course not include + // the potential derived classes of said record. + S.Stk.push(Ptr); + return true; + } + if (!CheckSubobject(S, OpPC, Ptr, CSK_Derived)) return false; if (!CheckDowncast(S, OpPC, Ptr, Off)) @@ -1600,10 +1610,11 @@ inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) { return true; } -inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) { +inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off, + bool NullOK) { const Pointer &Ptr = S.Stk.pop(); - if (!CheckNull(S, OpPC, Ptr, CSK_Base)) + if (!NullOK && !CheckNull(S, OpPC, Ptr, CSK_Base)) return false; if (!Ptr.isBlockPointer()) { diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 088a3e40fe2a7..41e4bae65c195 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -312,7 +312,7 @@ def GetPtrThisField : OffsetOpcode; // [Pointer] -> [Pointer] def GetPtrBase : OffsetOpcode; // [Pointer] -> [Pointer] -def GetPtrBasePop : OffsetOpcode; +def GetPtrBasePop : OffsetOpcode { let Args = [ArgUint32, ArgBool]; } def GetMemberPtrBasePop : Opcode { // Offset of field, which is a base. let Args = [ArgSint32]; @@ -322,9 +322,7 @@ def GetMemberPtrBasePop : Opcode { def FinishInitPop : Opcode; def FinishInit : Opcode; -def GetPtrDerivedPop : Opcode { - let Args = [ArgUint32]; -} +def GetPtrDerivedPop : Opcode { let Args = [ArgUint32, ArgBool]; } // [Pointer] -> [Pointer] def GetPtrVirtBasePop : Opcode { diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp index fc16448cf9e90..ab9d4869a74ee 100644 --- a/clang/lib/AST/DeclBase.cpp +++ b/clang/lib/AST/DeclBase.cpp @@ -438,7 +438,7 @@ bool Decl::isFileContextDecl() const { } bool Decl::isFlexibleArrayMemberLike( - ASTContext &Ctx, const Decl *D, QualType Ty, + const ASTContext &Ctx, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution) { // For compatibility with existing code, we treat arrays of length 0 or diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp index 1aa48f0026335..7eff776882629 100644 --- a/clang/lib/AST/DeclCXX.cpp +++ b/clang/lib/AST/DeclCXX.cpp @@ -3504,10 +3504,13 @@ VarDecl *BindingDecl::getHoldingVar() const { return VD; } -llvm::ArrayRef BindingDecl::getBindingPackExprs() const { +llvm::ArrayRef BindingDecl::getBindingPackDecls() const { assert(Binding && "expecting a pack expr"); - auto *RP = cast(Binding); - return RP->getExprs(); + auto *FP = cast(Binding); + ValueDecl *const *First = FP->getNumExpansions() > 0 ? FP->begin() : nullptr; + assert((!First || isa(*First)) && "expecting a BindingDecl"); + return llvm::ArrayRef( + reinterpret_cast(First), FP->getNumExpansions()); } void DecompositionDecl::anchor() {} diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 460167c1b9a3d..1f949d495f343 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -203,7 +203,7 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const { } bool Expr::isFlexibleArrayMemberLike( - ASTContext &Ctx, + const ASTContext &Ctx, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution) const { const Expr *E = IgnoreParens(); @@ -3672,7 +3672,6 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, case PackIndexingExprClass: case HLSLOutArgExprClass: case OpenACCAsteriskSizeExprClass: - case ResolvedUnexpandedPackExprClass: // These never have a side-effect. return false; diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp index d900af895b42a..c8d61e2cf3f26 100644 --- a/clang/lib/AST/ExprCXX.cpp +++ b/clang/lib/AST/ExprCXX.cpp @@ -1779,31 +1779,31 @@ TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const { return TemplateArgument(llvm::ArrayRef(Arguments, NumArguments)); } -FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack, +FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ValueDecl *ParamPack, SourceLocation NameLoc, unsigned NumParams, - VarDecl *const *Params) + ValueDecl *const *Params) : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary), ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) { if (Params) std::uninitialized_copy(Params, Params + NumParams, - getTrailingObjects()); + getTrailingObjects()); setDependence(ExprDependence::TypeValueInstantiation | ExprDependence::UnexpandedPack); } FunctionParmPackExpr * FunctionParmPackExpr::Create(const ASTContext &Context, QualType T, - VarDecl *ParamPack, SourceLocation NameLoc, - ArrayRef Params) { - return new (Context.Allocate(totalSizeToAlloc(Params.size()))) + ValueDecl *ParamPack, SourceLocation NameLoc, + ArrayRef Params) { + return new (Context.Allocate(totalSizeToAlloc(Params.size()))) FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data()); } FunctionParmPackExpr * FunctionParmPackExpr::CreateEmpty(const ASTContext &Context, unsigned NumParams) { - return new (Context.Allocate(totalSizeToAlloc(NumParams))) + return new (Context.Allocate(totalSizeToAlloc(NumParams))) FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr); } @@ -1965,52 +1965,3 @@ CXXFoldExpr::CXXFoldExpr(QualType T, UnresolvedLookupExpr *Callee, SubExprs[SubExpr::RHS] = RHS; setDependence(computeDependence(this)); } - -ResolvedUnexpandedPackExpr::ResolvedUnexpandedPackExpr(SourceLocation BL, - QualType QT, - unsigned NumExprs) - : Expr(ResolvedUnexpandedPackExprClass, QT, VK_PRValue, OK_Ordinary), - BeginLoc(BL), NumExprs(NumExprs) { - // C++ [temp.dep.expr]p3 - // An id-expression is type-dependent if it is - // - associated by name lookup with a pack - setDependence(ExprDependence::TypeValueInstantiation | - ExprDependence::UnexpandedPack); -} - -ResolvedUnexpandedPackExpr * -ResolvedUnexpandedPackExpr::CreateDeserialized(ASTContext &Ctx, - unsigned NumExprs) { - void *Mem = Ctx.Allocate(totalSizeToAlloc(NumExprs), - alignof(ResolvedUnexpandedPackExpr)); - return new (Mem) - ResolvedUnexpandedPackExpr(SourceLocation(), QualType(), NumExprs); -} - -ResolvedUnexpandedPackExpr * -ResolvedUnexpandedPackExpr::Create(ASTContext &Ctx, SourceLocation BL, - QualType T, unsigned NumExprs) { - void *Mem = Ctx.Allocate(totalSizeToAlloc(NumExprs), - alignof(ResolvedUnexpandedPackExpr)); - ResolvedUnexpandedPackExpr *New = - new (Mem) ResolvedUnexpandedPackExpr(BL, T, NumExprs); - - auto Exprs = New->getExprs(); - std::uninitialized_fill(Exprs.begin(), Exprs.end(), nullptr); - - return New; -} - -ResolvedUnexpandedPackExpr * -ResolvedUnexpandedPackExpr::Create(ASTContext &Ctx, SourceLocation BL, - QualType T, ArrayRef Exprs) { - auto *New = Create(Ctx, BL, T, Exprs.size()); - std::uninitialized_copy(Exprs.begin(), Exprs.end(), New->getExprs().begin()); - return New; -} - -ResolvedUnexpandedPackExpr *ResolvedUnexpandedPackExpr::getFromDecl(Decl *D) { - if (auto *BD = dyn_cast(D)) - return dyn_cast_if_present(BD->getBinding()); - return nullptr; -} diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp index 5225c3ca773ad..3f37d06cc8f3a 100644 --- a/clang/lib/AST/ExprClassification.cpp +++ b/clang/lib/AST/ExprClassification.cpp @@ -451,13 +451,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::PackExpansionExprClass: return ClassifyInternal(Ctx, cast(E)->getPattern()); - case Expr::ResolvedUnexpandedPackExprClass: { - if (cast(E)->getNumExprs() > 0) - return ClassifyInternal( - Ctx, cast(E)->getExpansion(0)); - return Cl::CL_LValue; - } - case Expr::MaterializeTemporaryExprClass: return cast(E)->isBoundToLvalueReference() ? Cl::CL_LValue diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 043974fb41443..6ccb6e23f8d2f 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -17253,7 +17253,6 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { case Expr::SYCLUniqueStableNameExprClass: case Expr::CXXParenListInitExprClass: case Expr::HLSLOutArgExprClass: - case Expr::ResolvedUnexpandedPackExprClass: return ICEDiag(IK_NotICE, E->getBeginLoc()); case Expr::InitListExprClass: { diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp index e5eb22eae7dd1..4a090118c3d7b 100644 --- a/clang/lib/AST/ItaniumMangle.cpp +++ b/clang/lib/AST/ItaniumMangle.cpp @@ -4932,8 +4932,7 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity, case Expr::AtomicExprClass: case Expr::SourceLocExprClass: case Expr::EmbedExprClass: - case Expr::BuiltinBitCastExprClass: - case Expr::ResolvedUnexpandedPackExprClass: { + case Expr::BuiltinBitCastExprClass: { NotPrimaryExpr(); if (!NullOut) { // As bad as this diagnostic is, it's better than crashing. diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 4b45190fa33ef..c8ea7b52a6241 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -2609,15 +2609,6 @@ void StmtPrinter::VisitPackIndexingExpr(PackIndexingExpr *E) { OS << "]"; } -void StmtPrinter::VisitResolvedUnexpandedPackExpr( - ResolvedUnexpandedPackExpr *E) { - OS << "<getExprs().begin(), E->getExprs().end(), - [this](auto *X) { PrintExpr(X); }, [this] { OS << ", "; }); - OS << ")>>"; -} - void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr( SubstNonTypeTemplateParmPackExpr *Node) { OS << *Node->getParameterPack(); diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 77ee6611f623f..2603df25ba2a4 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -2287,10 +2287,6 @@ void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) { ID.AddInteger(0); } } -void StmtProfiler::VisitResolvedUnexpandedPackExpr( - const ResolvedUnexpandedPackExpr *S) { - VisitExpr(S); -} void StmtProfiler::VisitPackIndexingExpr(const PackIndexingExpr *E) { VisitExpr(E); diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 08efcda46b8f9..26493caa5d06a 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -2189,6 +2189,11 @@ void TextNodeDumper::VisitEnumDecl(const EnumDecl *D) { OS << " __module_private__"; if (D->isFixed()) dumpType(D->getIntegerType()); + + if (const auto *Instance = D->getInstantiatedFromMemberEnum()) { + OS << " instantiated_from"; + dumpPointer(Instance); + } } void TextNodeDumper::VisitRecordDecl(const RecordDecl *D) { diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index b4aa3206fcfab..dff990d15dd62 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -238,6 +238,9 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts, else Builder.defineMacro("__riscv_32e"); } + + if (Opts.CFProtectionReturn && ISAInfo->hasExtension("zicfiss")) + Builder.defineMacro("__riscv_shadow_stack"); } static constexpr int NumRVVBuiltins = diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index d57f491a20c8e..348cb523b1718 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -20723,9 +20723,19 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, case AMDGPU::BI__builtin_amdgcn_bitop3_b16: return emitBuiltinWithOneOverloadedType<4>(*this, E, Intrinsic::amdgcn_bitop3); - case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc: - return emitBuiltinWithOneOverloadedType<4>( - *this, E, Intrinsic::amdgcn_make_buffer_rsrc); + case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc: { + // TODO: LLVM has this overloaded to allow for fat pointers, but since + // those haven't been plumbed through to Clang yet, default to creating the + // resource type. + SmallVector Args; + for (unsigned I = 0; I < 4; ++I) + Args.push_back(EmitScalarExpr(E->getArg(I))); + llvm::PointerType *RetTy = llvm::PointerType::get( + Builder.getContext(), llvm::AMDGPUAS::BUFFER_RESOURCE); + Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_make_buffer_rsrc, + {RetTy, Args[0]->getType()}); + return Builder.CreateCall(F, Args); + } case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b8: case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b16: case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b32: diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index 9f7db25a15bec..c2289985e9519 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -8879,7 +8879,7 @@ static void emitOffloadingArraysAndArgs( }; auto CustomMapperCB = [&](unsigned int I) { - llvm::Value *MFunc = nullptr; + llvm::Function *MFunc = nullptr; if (CombinedInfo.Mappers[I]) { Info.HasMapper = true; MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc( @@ -8887,9 +8887,9 @@ static void emitOffloadingArraysAndArgs( } return MFunc; }; - OMPBuilder.emitOffloadingArraysAndArgs( - AllocaIP, CodeGenIP, Info, Info.RTArgs, CombinedInfo, IsNonContiguous, - ForEndCall, DeviceAddrCB, CustomMapperCB); + cantFail(OMPBuilder.emitOffloadingArraysAndArgs( + AllocaIP, CodeGenIP, Info, Info.RTArgs, CombinedInfo, CustomMapperCB, + IsNonContiguous, ForEndCall, DeviceAddrCB)); } /// Check for inner distribute directive. @@ -9082,15 +9082,15 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D, return CombinedInfo; }; - auto CustomMapperCB = [&](unsigned I, llvm::Function **MapperFunc) { + auto CustomMapperCB = [&](unsigned I) { + llvm::Function *MapperFunc = nullptr; if (CombinedInfo.Mappers[I]) { // Call the corresponding mapper function. - *MapperFunc = getOrCreateUserDefinedMapperFunc( + MapperFunc = getOrCreateUserDefinedMapperFunc( cast(CombinedInfo.Mappers[I])); - assert(*MapperFunc && "Expect a valid mapper function is available."); - return true; + assert(MapperFunc && "Expect a valid mapper function is available."); } - return false; + return MapperFunc; }; SmallString<64> TyStr; @@ -9098,8 +9098,8 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out); std::string Name = getName({"omp_mapper", TyStr, D->getName()}); - auto *NewFn = OMPBuilder.emitUserDefinedMapper(PrivatizeAndGenMapInfoCB, - ElemTy, Name, CustomMapperCB); + llvm::Function *NewFn = cantFail(OMPBuilder.emitUserDefinedMapper( + PrivatizeAndGenMapInfoCB, ElemTy, Name, CustomMapperCB)); UDMMap.try_emplace(D, NewFn); if (CGF) FunctionUDMMap[CGF->CurFn].push_back(D); @@ -10073,7 +10073,7 @@ void CGOpenMPRuntime::emitTargetDataCalls( }; auto CustomMapperCB = [&](unsigned int I) { - llvm::Value *MFunc = nullptr; + llvm::Function *MFunc = nullptr; if (CombinedInfo.Mappers[I]) { Info.HasMapper = true; MFunc = CGF.CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc( @@ -10093,7 +10093,8 @@ void CGOpenMPRuntime::emitTargetDataCalls( llvm::OpenMPIRBuilder::InsertPointTy AfterIP = cantFail(OMPBuilder.createTargetData( OmpLoc, AllocaIP, CodeGenIP, DeviceID, IfCondVal, Info, GenMapInfoCB, - /*MapperFunc=*/nullptr, BodyCB, DeviceAddrCB, CustomMapperCB, RTLoc)); + CustomMapperCB, + /*MapperFunc=*/nullptr, BodyCB, DeviceAddrCB, RTLoc)); CGF.Builder.restoreIP(AfterIP); } diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 069fd40e2834c..e68daa422b7c4 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -2596,7 +2596,7 @@ class AnnotatingParser { (!NextNonComment && !Line.InMacroBody) || (NextNonComment && (NextNonComment->isPointerOrReference() || - NextNonComment->is(tok::string_literal) || + NextNonComment->isOneOf(TT_ClassHeadName, tok::string_literal) || (Line.InPragmaDirective && NextNonComment->is(tok::identifier))))) { return false; } @@ -6198,8 +6198,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, FormatStyle::PAS_Right && (!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName))); } - if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) || - Right.is(tok::kw_operator)) { + if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName, + TT_ClassHeadName, tok::kw_operator)) { return true; } if (Left.is(TT_PointerOrReference)) diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 014e629c959e2..b9a5c0589ebc4 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -4048,8 +4048,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) { StringRef Name = A->getValue(); - if (Name == "full" || Name == "branch") { + if (Name == "full") { + Opts.CFProtectionBranch = 1; + Opts.CFProtectionReturn = 1; + } else if (Name == "branch") { Opts.CFProtectionBranch = 1; + } else if (Name == "return") { + Opts.CFProtectionReturn = 1; } } diff --git a/clang/lib/Index/CMakeLists.txt b/clang/lib/Index/CMakeLists.txt index b4e294304f115..f0d2b579c8df6 100644 --- a/clang/lib/Index/CMakeLists.txt +++ b/clang/lib/Index/CMakeLists.txt @@ -23,6 +23,7 @@ add_clang_library(clangIndex clangFormat clangFrontend clangLex + clangSema clangSerialization clangToolingCore diff --git a/clang/lib/Index/IndexBody.cpp b/clang/lib/Index/IndexBody.cpp index f1dc4d5831ce7..5e69987820730 100644 --- a/clang/lib/Index/IndexBody.cpp +++ b/clang/lib/Index/IndexBody.cpp @@ -13,6 +13,7 @@ #include "clang/AST/ExprConcepts.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/Type.h" +#include "clang/Sema/HeuristicResolver.h" using namespace clang; using namespace clang::index; @@ -168,51 +169,31 @@ class BodyIndexer : public RecursiveASTVisitor { Parent, ParentDC, Roles, Relations, E); } - bool indexDependentReference( - const Expr *E, const Type *T, const DeclarationNameInfo &NameInfo, - llvm::function_ref Filter) { - if (!T) - return true; - const TemplateSpecializationType *TST = - T->getAs(); - if (!TST) - return true; - TemplateName TN = TST->getTemplateName(); - const ClassTemplateDecl *TD = - dyn_cast_or_null(TN.getAsTemplateDecl()); - if (!TD) - return true; - CXXRecordDecl *RD = TD->getTemplatedDecl(); - if (!RD->hasDefinition()) - return true; - RD = RD->getDefinition(); - std::vector Symbols = - RD->lookupDependentName(NameInfo.getName(), Filter); + bool indexDependentReference(const Expr *E, SourceLocation Loc, + std::vector TargetSymbols) { // FIXME: Improve overload handling. - if (Symbols.size() != 1) + if (TargetSymbols.size() != 1) return true; - SourceLocation Loc = NameInfo.getLoc(); if (Loc.isInvalid()) Loc = E->getBeginLoc(); SmallVector Relations; SymbolRoleSet Roles = getRolesForRef(E, Relations); - return IndexCtx.handleReference(Symbols[0], Loc, Parent, ParentDC, Roles, - Relations, E); + return IndexCtx.handleReference(TargetSymbols[0], Loc, Parent, ParentDC, + Roles, Relations, E); } bool VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E) { - const DeclarationNameInfo &Info = E->getMemberNameInfo(); - return indexDependentReference( - E, E->getBaseType().getTypePtrOrNull(), Info, - [](const NamedDecl *D) { return D->isCXXInstanceMember(); }); + auto *Resolver = IndexCtx.getResolver(); + assert(Resolver); + return indexDependentReference(E, E->getMemberNameInfo().getLoc(), + Resolver->resolveMemberExpr(E)); } bool VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) { - const DeclarationNameInfo &Info = E->getNameInfo(); - const NestedNameSpecifier *NNS = E->getQualifier(); - return indexDependentReference( - E, NNS->getAsType(), Info, - [](const NamedDecl *D) { return !D->isCXXInstanceMember(); }); + auto *Resolver = IndexCtx.getResolver(); + assert(Resolver); + return indexDependentReference(E, E->getNameInfo().getLoc(), + Resolver->resolveDeclRefExpr(E)); } bool VisitDesignatedInitExpr(DesignatedInitExpr *E) { diff --git a/clang/lib/Index/IndexingContext.cpp b/clang/lib/Index/IndexingContext.cpp index 2dd68dfcc5a70..bdd6c5acf1d34 100644 --- a/clang/lib/Index/IndexingContext.cpp +++ b/clang/lib/Index/IndexingContext.cpp @@ -14,6 +14,7 @@ #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Index/IndexDataConsumer.h" +#include "clang/Sema/HeuristicResolver.h" using namespace clang; using namespace index; @@ -25,6 +26,17 @@ static bool isGeneratedDecl(const Decl *D) { return false; } +IndexingContext::IndexingContext(IndexingOptions IndexOpts, + IndexDataConsumer &DataConsumer) + : IndexOpts(IndexOpts), DataConsumer(DataConsumer) {} + +IndexingContext::~IndexingContext() = default; + +void IndexingContext::setASTContext(ASTContext &ctx) { + Ctx = &ctx; + Resolver = Ctx ? std::make_unique(*Ctx) : nullptr; +} + bool IndexingContext::shouldIndex(const Decl *D) { return !isGeneratedDecl(D); } diff --git a/clang/lib/Index/IndexingContext.h b/clang/lib/Index/IndexingContext.h index 3020b33bea385..01bfcb9d578bc 100644 --- a/clang/lib/Index/IndexingContext.h +++ b/clang/lib/Index/IndexingContext.h @@ -21,6 +21,7 @@ namespace clang { class Decl; class DeclGroupRef; class ImportDecl; + class HeuristicResolver; class TagDecl; class TypeSourceInfo; class NamedDecl; @@ -39,15 +40,18 @@ class IndexingContext { IndexingOptions IndexOpts; IndexDataConsumer &DataConsumer; ASTContext *Ctx = nullptr; + std::unique_ptr Resolver; public: - IndexingContext(IndexingOptions IndexOpts, IndexDataConsumer &DataConsumer) - : IndexOpts(IndexOpts), DataConsumer(DataConsumer) {} + IndexingContext(IndexingOptions IndexOpts, IndexDataConsumer &DataConsumer); + ~IndexingContext(); const IndexingOptions &getIndexOpts() const { return IndexOpts; } IndexDataConsumer &getDataConsumer() { return DataConsumer; } - void setASTContext(ASTContext &ctx) { Ctx = &ctx; } + void setASTContext(ASTContext &ctx); + + HeuristicResolver *getResolver() const { return Resolver.get(); } bool shouldIndex(const Decl *D); diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp index 589869d018657..ce7d9be8d2faa 100644 --- a/clang/lib/Sema/AnalysisBasedWarnings.cpp +++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp @@ -697,10 +697,12 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, return; SourceLocation LBrace = Body->getBeginLoc(), RBrace = Body->getEndLoc(); auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) { - if (IsCoroutine) - S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType(); - else + if (IsCoroutine) { + if (DiagID != 0) + S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType(); + } else { S.Diag(Loc, DiagID); + } }; // cpu_dispatch functions permit empty function bodies for ICC compatibility. diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp index 3a22097152df5..0e4f3b20c78cd 100644 --- a/clang/lib/Sema/SemaCoroutine.cpp +++ b/clang/lib/Sema/SemaCoroutine.cpp @@ -1176,6 +1176,10 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) { for (AddrLabelExpr *ALE : Fn->AddrLabels) Diag(ALE->getBeginLoc(), diag::err_coro_invalid_addr_of_label); + // Coroutines always return a handle, so they can't be [[noreturn]]. + if (FD->isNoReturn()) + Diag(FD->getLocation(), diag::warn_noreturn_coroutine) << FD; + CoroutineStmtBuilder Builder(*this, *FD, *Fn, Body); if (Builder.isInvalid() || !Builder.buildStatements()) return FD->setInvalidDecl(); diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 0cf02fe6407c2..664d48ccbc382 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -980,24 +980,24 @@ static bool CheckBindingsCount(Sema &S, DecompositionDecl *DD, if (IsValid && HasPack) { // Create the pack expr and assign it to the binding. unsigned PackSize = MemberCount - Bindings.size() + 1; - QualType PackType = S.Context.getPackExpansionType( - S.Context.DependentTy, std::nullopt, /*ExpectsPackInType=*/false); - BindingDecl *BD = (*BindingWithPackItr); - auto *RP = ResolvedUnexpandedPackExpr::Create(S.Context, DD->getBeginLoc(), - DecompType, PackSize); - BD->setDecomposedDecl(DD); - BD->setBinding(PackType, RP); BindingDecl *BPack = *BindingWithPackItr; + BPack->setDecomposedDecl(DD); + SmallVector NestedBDs(PackSize); // Create the nested BindingDecls. - for (Expr *&E : RP->getExprs()) { - auto *NestedBD = BindingDecl::Create(S.Context, BPack->getDeclContext(), - BPack->getLocation(), - BPack->getIdentifier(), QualType()); + for (unsigned I = 0; I < PackSize; ++I) { + BindingDecl *NestedBD = BindingDecl::Create( + S.Context, BPack->getDeclContext(), BPack->getLocation(), + BPack->getIdentifier(), QualType()); NestedBD->setDecomposedDecl(DD); - E = S.BuildDeclRefExpr(NestedBD, S.Context.DependentTy, VK_LValue, - BPack->getLocation()); + NestedBDs[I] = NestedBD; } + + QualType PackType = S.Context.getPackExpansionType( + S.Context.DependentTy, PackSize, /*ExpectsPackInType=*/false); + auto *PackExpr = FunctionParmPackExpr::Create( + S.Context, PackType, BPack, BPack->getBeginLoc(), NestedBDs); + BPack->setBinding(PackType, PackExpr); } if (IsValid) diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp index 8c8ba1da88ebf..a8eb24133a76d 100644 --- a/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/clang/lib/Sema/SemaExceptionSpec.cpp @@ -1286,7 +1286,6 @@ CanThrowResult Sema::canThrow(const Stmt *S) { case Expr::ConvertVectorExprClass: case Expr::VAArgExprClass: case Expr::CXXParenListInitExprClass: - case Expr::ResolvedUnexpandedPackExprClass: return canSubStmtsThrow(*this, S); case Expr::CompoundLiteralExprClass: diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 5817632b61dbd..fad15bf95c415 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -15949,7 +15949,7 @@ ExprResult Sema::ActOnStmtExprResult(ExprResult ER) { // FIXME: Provide a better location for the initialization. return PerformCopyInitialization( InitializedEntity::InitializeStmtExprResult( - E->getBeginLoc(), E->getType().getUnqualifiedType()), + E->getBeginLoc(), E->getType().getAtomicUnqualifiedType()), SourceLocation(), E); } @@ -19430,7 +19430,7 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E, auto *FPPE = cast(E); // If any of the declarations in the pack is odr-used, then the expression // as a whole constitutes an odr-use. - for (VarDecl *D : *FPPE) + for (ValueDecl *D : *FPPE) if (IsPotentialResultOdrUsed(D)) return ExprEmpty(); @@ -19705,7 +19705,7 @@ void Sema::CleanupVarDeclMarking() { MarkVarDeclODRUsed(cast(ME->getMemberDecl()), ME->getMemberLoc(), *this); } else if (auto *FP = dyn_cast(E)) { - for (VarDecl *VD : *FP) + for (ValueDecl *VD : *FP) MarkVarDeclODRUsed(VD, FP->getParameterPackLocation(), *this); } else { llvm_unreachable("Unexpected expression"); @@ -20081,7 +20081,7 @@ void Sema::MarkMemberReferenced(MemberExpr *E) { } void Sema::MarkFunctionParmPackReferenced(FunctionParmPackExpr *E) { - for (VarDecl *VD : *E) + for (ValueDecl *VD : *E) MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true, RefsMinusAssignments); } diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index ec38674a2c3e7..0394edb7889ba 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -3910,7 +3910,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, FnRetType = FD->getReturnType(); if (FD->hasAttrs()) Attrs = &FD->getAttrs(); - if (FD->isNoReturn()) + if (FD->isNoReturn() && !getCurFunction()->isCoroutine()) Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr) << FD; if (FD->isMain() && RetValExp) if (isa(RetValExp)) diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp index d1a45af6ca58f..121da4916ed43 100644 --- a/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -1585,20 +1585,16 @@ namespace { SubstNonTypeTemplateParmExpr *E); /// Rebuild a DeclRefExpr for a VarDecl reference. - ExprResult RebuildVarDeclRefExpr(VarDecl *PD, SourceLocation Loc); + ExprResult RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc); /// Transform a reference to a function or init-capture parameter pack. - ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E, VarDecl *PD); + ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E, ValueDecl *PD); /// Transform a FunctionParmPackExpr which was built when we couldn't /// expand a function parameter pack reference which refers to an expanded /// pack. ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E); - // Transform a ResolvedUnexpandedPackExpr - ExprResult - TransformResolvedUnexpandedPackExpr(ResolvedUnexpandedPackExpr *E); - QualType TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc TL) { // Call the base version; it will forward to our overridden version below. @@ -2392,7 +2388,7 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr( SugaredConverted, E->getPackIndex()); } -ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(VarDecl *PD, +ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc) { DeclarationNameInfo NameInfo(PD->getDeclName(), Loc); return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD); @@ -2402,8 +2398,8 @@ ExprResult TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { if (getSema().ArgumentPackSubstitutionIndex != -1) { // We can expand this parameter pack now. - VarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex); - VarDecl *VD = cast_or_null(TransformDecl(E->getExprLoc(), D)); + ValueDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex); + ValueDecl *VD = cast_or_null(TransformDecl(E->getExprLoc(), D)); if (!VD) return ExprError(); return RebuildVarDeclRefExpr(VD, E->getExprLoc()); @@ -2415,11 +2411,11 @@ TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { // Transform each of the parameter expansions into the corresponding // parameters in the instantiation of the function decl. - SmallVector Vars; + SmallVector Vars; Vars.reserve(E->getNumExpansions()); for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end(); I != End; ++I) { - VarDecl *D = cast_or_null(TransformDecl(E->getExprLoc(), *I)); + ValueDecl *D = cast_or_null(TransformDecl(E->getExprLoc(), *I)); if (!D) return ExprError(); Vars.push_back(D); @@ -2434,7 +2430,7 @@ TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { ExprResult TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E, - VarDecl *PD) { + ValueDecl *PD) { typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; llvm::PointerUnion *Found = getSema().CurrentInstantiationScope->findInstantiationOf(PD); @@ -2460,7 +2456,8 @@ TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E, } // We have either an unexpanded pack or a specific expansion. - return RebuildVarDeclRefExpr(cast(TransformedDecl), E->getExprLoc()); + return RebuildVarDeclRefExpr(cast(TransformedDecl), + E->getExprLoc()); } ExprResult @@ -2482,15 +2479,6 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) { if (PD->isParameterPack()) return TransformFunctionParmPackRefExpr(E, PD); - if (BindingDecl *BD = dyn_cast(D); BD && BD->isParameterPack()) { - BD = cast_or_null(TransformDecl(BD->getLocation(), BD)); - if (!BD) - return ExprError(); - if (auto *RP = - dyn_cast_if_present(BD->getBinding())) - return TransformResolvedUnexpandedPackExpr(RP); - } - return inherited::TransformDeclRefExpr(E); } @@ -2651,19 +2639,6 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB, return Result; } -ExprResult TemplateInstantiator::TransformResolvedUnexpandedPackExpr( - ResolvedUnexpandedPackExpr *E) { - if (getSema().ArgumentPackSubstitutionIndex != -1) { - assert(static_cast(getSema().ArgumentPackSubstitutionIndex) < - E->getNumExprs() && - "ArgumentPackSubstitutionIndex is out of range"); - return TransformExpr( - E->getExpansion(getSema().ArgumentPackSubstitutionIndex)); - } - - return inherited::TransformResolvedUnexpandedPackExpr(E); -} - QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType( TypeLocBuilder &TLB, SubstTemplateTypeParmPackTypeLoc TL, bool SuppressObjCLifetime) { @@ -4680,7 +4655,7 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) { #endif Stored = Inst; } else if (DeclArgumentPack *Pack = dyn_cast(Stored)) { - Pack->push_back(cast(Inst)); + Pack->push_back(cast(Inst)); } else { assert(cast(Stored) == Inst && "Already instantiated this local"); } diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index 1f42f9500959e..1cdf80898bfca 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -1179,13 +1179,13 @@ Decl *TemplateDeclInstantiator::VisitDecompositionDecl(DecompositionDecl *D) { // Transform the bindings first. // The transformed DD will have all of the concrete BindingDecls. SmallVector NewBindings; - ResolvedUnexpandedPackExpr *OldResolvedPack = nullptr; + BindingDecl *OldBindingPack = nullptr; for (auto *OldBD : D->bindings()) { Expr *BindingExpr = OldBD->getBinding(); - if (auto *RP = - dyn_cast_if_present(BindingExpr)) { - assert(!OldResolvedPack && "no more than one pack is allowed"); - OldResolvedPack = RP; + if (isa_and_present(BindingExpr)) { + // We have a resolved pack. + assert(!OldBindingPack && "no more than one pack is allowed"); + OldBindingPack = OldBD; } NewBindings.push_back(cast(VisitBindingDecl(OldBD))); } @@ -1198,25 +1198,20 @@ Decl *TemplateDeclInstantiator::VisitDecompositionDecl(DecompositionDecl *D) { for (auto *NewBD : NewBindings) NewBD->setInvalidDecl(); - if (OldResolvedPack) { - // Mark the holding vars (if any) in the pack as instantiated since - // they are created implicitly. + if (OldBindingPack) { + // Mark the bindings in the pack as instantiated. auto Bindings = NewDD->bindings(); - auto BPack = llvm::find_if( + BindingDecl *NewBindingPack = *llvm::find_if( Bindings, [](BindingDecl *D) -> bool { return D->isParameterPack(); }); - auto *NewResolvedPack = - cast((*BPack)->getBinding()); - auto OldExprs = OldResolvedPack->getExprs(); - auto NewExprs = NewResolvedPack->getExprs(); - assert(OldExprs.size() == NewExprs.size()); - for (unsigned I = 0; I < OldResolvedPack->getNumExprs(); I++) { - DeclRefExpr *OldDRE = cast(OldExprs[I]); - BindingDecl *OldNestedBD = cast(OldDRE->getDecl()); - DeclRefExpr *NewDRE = cast(NewExprs[I]); - BindingDecl *NewNestedBD = cast(NewDRE->getDecl()); - SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldNestedBD, - NewNestedBD); - } + assert(NewBindingPack != nullptr && "new bindings should also have a pack"); + llvm::ArrayRef OldDecls = + OldBindingPack->getBindingPackDecls(); + llvm::ArrayRef NewDecls = + NewBindingPack->getBindingPackDecls(); + assert(OldDecls.size() == NewDecls.size()); + for (unsigned I = 0; I < OldDecls.size(); I++) + SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldDecls[I], + NewDecls[I]); } return NewDD; @@ -6280,9 +6275,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, if (auto *BD = dyn_cast(FD); BD && BD->isParameterPack() && ArgumentPackSubstitutionIndex != -1) { - auto *DRE = cast( - BD->getBindingPackExprs()[ArgumentPackSubstitutionIndex]); - return cast(DRE->getDecl()); + return BD->getBindingPackDecls()[ArgumentPackSubstitutionIndex]; } return cast(FD); } diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp index 3c56794722dcc..fad00f7648848 100644 --- a/clang/lib/Sema/SemaTemplateVariadic.cpp +++ b/clang/lib/Sema/SemaTemplateVariadic.cpp @@ -50,13 +50,8 @@ class CollectUnexpandedParameterPacksVisitor auto *FTD = FD ? FD->getDescribedFunctionTemplate() : nullptr; if (FTD && FTD->getTemplateParameters()->getDepth() >= DepthLimit) return; - } else if (auto *BD = dyn_cast(ND)) { - Expr *E = BD->getBinding(); - if (auto *RP = cast_if_present(E)) { - addUnexpanded(RP); - return; - } - } else if (getDepthAndIndex(ND).first >= DepthLimit) { + } else if (ND->isTemplateParameterPack() && + getDepthAndIndex(ND).first >= DepthLimit) { return; } @@ -69,10 +64,6 @@ class CollectUnexpandedParameterPacksVisitor Unexpanded.push_back({T, Loc}); } - void addUnexpanded(ResolvedUnexpandedPackExpr *E) { - Unexpanded.push_back({E, E->getBeginLoc()}); - } - public: explicit CollectUnexpandedParameterPacksVisitor( SmallVectorImpl &Unexpanded) @@ -115,12 +106,6 @@ class CollectUnexpandedParameterPacksVisitor return true; } - bool - VisitResolvedUnexpandedPackExpr(ResolvedUnexpandedPackExpr *E) override { - addUnexpanded(E); - return true; - } - /// Record occurrences of template template parameter packs. bool TraverseTemplateName(TemplateName Template) override { if (auto *TTP = dyn_cast_or_null( @@ -782,16 +767,13 @@ bool Sema::CheckParameterPacksForExpansion( unsigned Depth = 0, Index = 0; IdentifierInfo *Name; bool IsVarDeclPack = false; - ResolvedUnexpandedPackExpr *ResolvedPack = nullptr; + FunctionParmPackExpr *BindingPack = nullptr; if (const TemplateTypeParmType *TTP = ParmPack.first.dyn_cast()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); Name = TTP->getIdentifier(); - } else if (auto *RP = - ParmPack.first.dyn_cast()) { - ResolvedPack = RP; } else { NamedDecl *ND = cast(ParmPack.first); if (isa(ND)) @@ -802,8 +784,8 @@ bool Sema::CheckParameterPacksForExpansion( CurrentInstantiationScope->findInstantiationOf(ND); Decl *B = cast(*Instantiation); Expr *BindingExpr = cast(B)->getBinding(); - ResolvedPack = cast_if_present(BindingExpr); - if (!ResolvedPack) { + BindingPack = cast_if_present(BindingExpr); + if (!BindingPack) { ShouldExpand = false; continue; } @@ -829,8 +811,8 @@ bool Sema::CheckParameterPacksForExpansion( ShouldExpand = false; continue; } - } else if (ResolvedPack) { - NewPackSize = ResolvedPack->getNumExprs(); + } else if (BindingPack) { + NewPackSize = BindingPack->getNumExpansions(); } else { // If we don't have a template argument at this depth/index, then we // cannot expand the pack expansion. Make a note of this, but we still @@ -867,7 +849,7 @@ bool Sema::CheckParameterPacksForExpansion( // Template argument deduction can extend the sequence of template // arguments corresponding to a template parameter pack, even when the // sequence contains explicitly specified template arguments. - if (!IsVarDeclPack && !ResolvedPack && CurrentInstantiationScope) { + if (!IsVarDeclPack && CurrentInstantiationScope) { if (NamedDecl *PartialPack = CurrentInstantiationScope->getPartiallySubstitutedPack()) { unsigned PartialDepth, PartialIndex; @@ -973,12 +955,6 @@ std::optional Sema::getNumArgumentsInExpansionFromUnexpanded( Unexpanded[I].first.dyn_cast()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); - } else if (auto *PE = Unexpanded[I] - .first.dyn_cast()) { - unsigned Size = PE->getNumExprs(); - assert((!Result || *Result == Size) && "inconsistent pack sizes"); - Result = Size; - continue; } else { NamedDecl *ND = cast(Unexpanded[I].first); if (isa(ND)) { @@ -1207,12 +1183,8 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S, MarkAnyDeclReferenced(OpLoc, ParameterPack, true); - std::optional Length; - if (auto *RP = ResolvedUnexpandedPackExpr::getFromDecl(ParameterPack)) - Length = RP->getNumExprs(); - return SizeOfPackExpr::Create(Context, OpLoc, ParameterPack, NameLoc, - RParenLoc, Length); + RParenLoc); } static bool isParameterPack(Expr *PackExpression) { @@ -1360,7 +1332,7 @@ std::optional Sema::getFullyPackExpandedSize(TemplateArgument Arg) { dyn_cast(Arg.getAsExpr())) Pack = Subst->getArgumentPack(); else if (auto *Subst = dyn_cast(Arg.getAsExpr())) { - for (VarDecl *PD : *Subst) + for (ValueDecl *PD : *Subst) if (PD->isParameterPack()) return std::nullopt; return Subst->getNumExpansions(); diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 73e979927b4f3..05cac8db3c42c 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -3680,13 +3680,6 @@ class TreeTransform { FullySubstituted); } - ExprResult RebuildResolvedUnexpandedPackExpr(SourceLocation BeginLoc, - QualType T, - ArrayRef Exprs) { - return ResolvedUnexpandedPackExpr::Create(SemaRef.Context, BeginLoc, T, - Exprs); - } - /// Build a new expression representing a call to a source location /// builtin. /// @@ -16183,24 +16176,6 @@ TreeTransform::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { return E; } -template -ExprResult TreeTransform::TransformResolvedUnexpandedPackExpr( - ResolvedUnexpandedPackExpr *E) { - bool ArgumentChanged = false; - SmallVector NewExprs; - if (TransformExprs(E->getExprs().begin(), E->getNumExprs(), - /*IsCall=*/false, NewExprs, &ArgumentChanged)) - return ExprError(); - - if (!AlwaysRebuild() && !ArgumentChanged) - return E; - - // NOTE: The type is just a superficial PackExpansionType - // that needs no substitution. - return RebuildResolvedUnexpandedPackExpr(E->getBeginLoc(), E->getType(), - NewExprs); -} - template ExprResult TreeTransform::TransformMaterializeTemporaryExpr( diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp index a89eee601e437..fba54023a6bb2 100644 --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2208,16 +2208,6 @@ void ASTStmtReader::VisitPackIndexingExpr(PackIndexingExpr *E) { Exprs[I] = Record.readExpr(); } -void ASTStmtReader::VisitResolvedUnexpandedPackExpr( - ResolvedUnexpandedPackExpr *E) { - VisitExpr(E); - E->NumExprs = Record.readInt(); - E->BeginLoc = readSourceLocation(); - auto **Exprs = E->getTrailingObjects(); - for (unsigned I = 0; I < E->NumExprs; ++I) - Exprs[I] = Record.readExpr(); -} - void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr( SubstNonTypeTemplateParmExpr *E) { VisitExpr(E); @@ -2249,11 +2239,11 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr( void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) { VisitExpr(E); E->NumParameters = Record.readInt(); - E->ParamPack = readDeclAs(); + E->ParamPack = readDeclAs(); E->NameLoc = readSourceLocation(); - auto **Parms = E->getTrailingObjects(); + auto **Parms = E->getTrailingObjects(); for (unsigned i = 0, n = E->NumParameters; i != n; ++i) - Parms[i] = readDeclAs(); + Parms[i] = readDeclAs(); } void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { @@ -4321,12 +4311,6 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { /*TransformedExprs=*/Record[ASTStmtReader::NumExprFields]); break; - case EXPR_RESOLVED_UNEXPANDED_PACK: - S = ResolvedUnexpandedPackExpr::CreateDeserialized( - Context, - /*NumExprs=*/Record[ASTStmtReader::NumExprFields]); - break; - case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM: S = new (Context) SubstNonTypeTemplateParmExpr(Empty); break; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 64791300fe722..79b777cddd0b0 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -874,7 +874,6 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream, RECORD(EXPR_PACK_EXPANSION); RECORD(EXPR_SIZEOF_PACK); RECORD(EXPR_PACK_INDEXING); - RECORD(EXPR_RESOLVED_UNEXPANDED_PACK); RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM); RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK); RECORD(EXPR_FUNCTION_PARM_PACK); diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp index 6a779f1618287..2687231d7820f 100644 --- a/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/clang/lib/Serialization/ASTWriterStmt.cpp @@ -2210,16 +2210,6 @@ void ASTStmtWriter::VisitPackIndexingExpr(PackIndexingExpr *E) { Code = serialization::EXPR_PACK_INDEXING; } -void ASTStmtWriter::VisitResolvedUnexpandedPackExpr( - ResolvedUnexpandedPackExpr *E) { - VisitExpr(E); - Record.push_back(E->getNumExprs()); - Record.AddSourceLocation(E->getBeginLoc()); - for (Expr *Sub : E->getExprs()) - Record.AddStmt(Sub); - Code = serialization::EXPR_RESOLVED_UNEXPANDED_PACK; -} - void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr( SubstNonTypeTemplateParmExpr *E) { VisitExpr(E); diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp index f56e9192d1d66..954b4763034e7 100644 --- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp @@ -34,24 +34,37 @@ using namespace taint; using llvm::formatv; namespace { -/// If `E` is a "clean" array subscript expression, return the type of the -/// accessed element. If the base of the subscript expression is modified by -/// pointer arithmetic (and not the beginning of a "full" memory region), this -/// always returns nullopt because that's the right (or the least bad) thing to -/// do for the diagnostic output that's relying on this. -static std::optional determineElementType(const Expr *E, - const CheckerContext &C) { +/// If `E` is an array subscript expression with a base that is "clean" (= not +/// modified by pointer arithmetic = the beginning of a memory region), return +/// it as a pointer to ArraySubscriptExpr; otherwise return nullptr. +/// This helper function is used by two separate heuristics that are only valid +/// in these "clean" cases. +static const ArraySubscriptExpr * +getAsCleanArraySubscriptExpr(const Expr *E, const CheckerContext &C) { const auto *ASE = dyn_cast(E); if (!ASE) - return std::nullopt; + return nullptr; const MemRegion *SubscriptBaseReg = C.getSVal(ASE->getBase()).getAsRegion(); if (!SubscriptBaseReg) - return std::nullopt; + return nullptr; // The base of the subscript expression is affected by pointer arithmetics, - // so we want to report byte offsets instead of indices. + // so we want to report byte offsets instead of indices and we don't want to + // activate the "index is unsigned -> cannot be negative" shortcut. if (isa(SubscriptBaseReg->StripCasts())) + return nullptr; + + return ASE; +} + +/// If `E` is a "clean" array subscript expression, return the type of the +/// accessed element; otherwise return std::nullopt because that's the best (or +/// least bad) option for the diagnostic generation that relies on this. +static std::optional determineElementType(const Expr *E, + const CheckerContext &C) { + const auto *ASE = getAsCleanArraySubscriptExpr(E, C); + if (!ASE) return std::nullopt; return ASE->getType(); @@ -140,7 +153,9 @@ class ArrayBoundChecker : public Checker, ProgramStateRef ErrorState, NonLoc Val, bool MarkTaint); - static bool isFromCtypeMacro(const Stmt *S, ASTContext &AC); + static bool isFromCtypeMacro(const Expr *E, ASTContext &AC); + + static bool isOffsetObviouslyNonnegative(const Expr *E, CheckerContext &C); static bool isIdiomaticPastTheEndPtr(const Expr *E, ProgramStateRef State, NonLoc Offset, NonLoc Limit, @@ -587,20 +602,48 @@ void ArrayBoundChecker::performCheck(const Expr *E, CheckerContext &C) const { State, ByteOffset, SVB.makeZeroArrayIndex(), SVB); if (PrecedesLowerBound) { - // The offset may be invalid (negative)... - if (!WithinLowerBound) { - // ...and it cannot be valid (>= 0), so report an error. - Messages Msgs = getPrecedesMsgs(Reg, ByteOffset); - reportOOB(C, PrecedesLowerBound, Msgs, ByteOffset, std::nullopt); - return; + // The analyzer thinks that the offset may be invalid (negative)... + + if (isOffsetObviouslyNonnegative(E, C)) { + // ...but the offset is obviously non-negative (clear array subscript + // with an unsigned index), so we're in a buggy situation. + + // TODO: Currently the analyzer ignores many casts (e.g. signed -> + // unsigned casts), so it can easily reach states where it will load a + // signed (and negative) value from an unsigned variable. This sanity + // check is a duct tape "solution" that silences most of the ugly false + // positives that are caused by this buggy behavior. Note that this is + // not a complete solution: this cannot silence reports where pointer + // arithmetic complicates the picture and cannot ensure modeling of the + // "unsigned index is positive with highest bit set" cases which are + // "usurped" by the nonsense "unsigned index is negative" case. + // For more information about this topic, see the umbrella ticket + // https://github.com/llvm/llvm-project/issues/39492 + // TODO: Remove this hack once 'SymbolCast's are modeled properly. + + if (!WithinLowerBound) { + // The state is completely nonsense -- let's just sink it! + C.addSink(); + return; + } + // Otherwise continue on the 'WithinLowerBound' branch where the + // unsigned index _is_ non-negative. Don't mention this assumption as a + // note tag, because it would just confuse the users! + } else { + if (!WithinLowerBound) { + // ...and it cannot be valid (>= 0), so report an error. + Messages Msgs = getPrecedesMsgs(Reg, ByteOffset); + reportOOB(C, PrecedesLowerBound, Msgs, ByteOffset, std::nullopt); + return; + } + // ...but it can be valid as well, so the checker will (optimistically) + // assume that it's valid and mention this in the note tag. + SUR.recordNonNegativeAssumption(); } - // ...but it can be valid as well, so the checker will (optimistically) - // assume that it's valid and mention this in the note tag. - SUR.recordNonNegativeAssumption(); } // Actually update the state. The "if" only fails in the extremely unlikely - // case when compareValueToThreshold returns {nullptr, nullptr} becasue + // case when compareValueToThreshold returns {nullptr, nullptr} because // evalBinOpNN fails to evaluate the less-than operator. if (WithinLowerBound) State = WithinLowerBound; @@ -660,7 +703,7 @@ void ArrayBoundChecker::performCheck(const Expr *E, CheckerContext &C) const { } // Actually update the state. The "if" only fails in the extremely unlikely - // case when compareValueToThreshold returns {nullptr, nullptr} becasue + // case when compareValueToThreshold returns {nullptr, nullptr} because // evalBinOpNN fails to evaluate the less-than operator. if (WithinUpperBound) State = WithinUpperBound; @@ -725,8 +768,8 @@ void ArrayBoundChecker::reportOOB(CheckerContext &C, ProgramStateRef ErrorState, C.emitReport(std::move(BR)); } -bool ArrayBoundChecker::isFromCtypeMacro(const Stmt *S, ASTContext &ACtx) { - SourceLocation Loc = S->getBeginLoc(); +bool ArrayBoundChecker::isFromCtypeMacro(const Expr *E, ASTContext &ACtx) { + SourceLocation Loc = E->getBeginLoc(); if (!Loc.isMacroID()) return false; @@ -744,6 +787,14 @@ bool ArrayBoundChecker::isFromCtypeMacro(const Stmt *S, ASTContext &ACtx) { (MacroName == "isupper") || (MacroName == "isxdigit")); } +bool ArrayBoundChecker::isOffsetObviouslyNonnegative(const Expr *E, + CheckerContext &C) { + const ArraySubscriptExpr *ASE = getAsCleanArraySubscriptExpr(E, C); + if (!ASE) + return false; + return ASE->getIdx()->getType()->isUnsignedIntegerOrEnumerationType(); +} + bool ArrayBoundChecker::isInAddressOf(const Stmt *S, ASTContext &ACtx) { ParentMapContext &ParentCtx = ACtx.getParentMapContext(); do { diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index d93952264a606..c3dcdc985a935 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1745,7 +1745,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::DependentCoawaitExprClass: case Stmt::CoreturnStmtClass: case Stmt::CoyieldExprClass: - case Stmt::ResolvedUnexpandedPackExprClass: case Stmt::SEHTryStmtClass: case Stmt::SEHExceptStmtClass: case Stmt::SEHLeaveStmtClass: diff --git a/clang/test/AST/ByteCode/records.cpp b/clang/test/AST/ByteCode/records.cpp index 9470e7d8e3dcb..a13f30cd23119 100644 --- a/clang/test/AST/ByteCode/records.cpp +++ b/clang/test/AST/ByteCode/records.cpp @@ -1656,12 +1656,28 @@ namespace ExprWithCleanups { static_assert(F == 1i, ""); } -namespace NullptrUpcast { +namespace NullptrCast { struct A {}; struct B : A { int n; }; + constexpr A *na = nullptr; constexpr B *nb = nullptr; constexpr A &ra = *nb; // both-error {{constant expression}} \ // both-note {{cannot access base class of null pointer}} + constexpr B &rb = (B&)*na; // both-error {{constant expression}} \ + // both-note {{cannot access derived class of null pointer}} + constexpr bool test() { + auto a = (A*)(B*)nullptr; + + return a == nullptr; + } + static_assert(test(), ""); + + constexpr bool test2() { + auto a = (B*)(A*)nullptr; + + return a == nullptr; + } + static_assert(test2(), ""); } namespace NonConst { @@ -1699,3 +1715,22 @@ namespace IgnoredMemberExpr { }; static_assert(B{}.foo() == 0, ""); } + +#if __cplusplus >= 202002L +namespace DeadUpcast { + struct A {}; + struct B : A{}; + constexpr bool foo() { + + B *pb; + { + B b; + pb = &b; + } + A *pa = pb; + + return true; + } + static_assert(foo(), ""); +} +#endif diff --git a/clang/test/AST/HLSL/TypdefArrayParam.hlsl b/clang/test/AST/HLSL/TypdefArrayParam.hlsl index c6ae168f84064..37f7a66de23a1 100644 --- a/clang/test/AST/HLSL/TypdefArrayParam.hlsl +++ b/clang/test/AST/HLSL/TypdefArrayParam.hlsl @@ -55,3 +55,14 @@ void call2() { uint4 C[2] = {A,A}; uint32_t D = Accumulate(C); } + +typedef int Foo[2]; + +// CHECK-LABEL: call3 +// CHECK: ArraySubscriptExpr {{.*}} 'int' lvalue +// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int *' +// CHECK-NEXT: DeclRefExpr {{.*}} 'int[2]' lvalue ParmVar {{.*}} 'F' 'int[2]' +// CHECK-NEXT: IntegerLiteral {{.*}} 'int' 0 +int call3(Foo F) { + return F[0]; +} diff --git a/clang/test/AST/ast-dump-binding-pack.cpp b/clang/test/AST/ast-dump-binding-pack.cpp index 81c75a1268730..c4a353ae72a1b 100644 --- a/clang/test/AST/ast-dump-binding-pack.cpp +++ b/clang/test/AST/ast-dump-binding-pack.cpp @@ -22,10 +22,7 @@ void foo() { // CHECK-NEXT: IntegerLiteral {{.*}} 'int' 0 // CHECK-NOT: BindingDecl // CHECK-LABEL: BindingDecl {{.*}} binding_rest -// CHECK-NEXT: ResolvedUnexpandedPackExpr -// CHECK-NEXT: DeclRefExpr {{.*}} lvalue Binding {{.*}} 'binding_rest' -// CHECK-NEXT: DeclRefExpr {{.*}} lvalue Binding {{.*}} 'binding_rest' -// CHECK-NOT: BindingDecl +// CHECK-NEXT: FunctionParmPackExpr // CHECK-LABEL: BindingDecl {{.*}} binding_4 // CHECK-NEXT: ArraySubscriptExpr // CHECK-NEXT: ImplicitCastExpr {{.*}} @@ -47,9 +44,7 @@ void bar() { // CHECK-LABEL: FunctionTemplateDecl {{.*}} bar // CHECK-NOT: BindingDecl // CHECK-LABEL: BindingDecl {{.*}} empty_binding_pack -// CHECK-NEXT: ResolvedUnexpandedPackExpr -// CHECK-NOT: DeclRefExpr {{.*}} 'empty_binding_pack' -// CHECK-NOT: BindingDecl +// CHECK-NEXT: FunctionParmPackExpr // CHECK: DeclStmt struct int_pair { int x; int y; }; @@ -67,8 +62,6 @@ void(*f)() = baz; // CHECK: BindingDecl {{.*}} binding_2 // CHECK-NOT: BindingDecl // CHECK-LABEL: BindingDecl {{.*}} empty_binding_pack -// CHECK-NEXT: ResolvedUnexpandedPackExpr -// CHECK-NOT: DeclRefExpr {{.*}} 'empty_binding_pack' -// CHECK-NOT: BindingDecl +// CHECK-NEXT: FunctionParmPackExpr // CHECK: DeclStmt #endif diff --git a/clang/test/AST/ast-dump-decl.cpp b/clang/test/AST/ast-dump-decl.cpp index e84241cee922f..d79051fb6efaa 100644 --- a/clang/test/AST/ast-dump-decl.cpp +++ b/clang/test/AST/ast-dump-decl.cpp @@ -15,7 +15,9 @@ class testEnumDecl { enum TestEnumDeclFixed : int; }; // CHECK: EnumDecl{{.*}} class TestEnumDeclScoped 'int' +// CHECK-NOT: instantiated_from // CHECK: EnumDecl{{.*}} TestEnumDeclFixed 'int' +// CHECK-NOT: instantiated_from class testFieldDecl { int TestFieldDeclInit = 0; @@ -328,9 +330,9 @@ namespace testClassTemplateDecl { // CHECK-NEXT: | | `-Destructor irrelevant non_trivial user_declared{{$}} // CHECK-NEXT: | |-CXXRecordDecl 0x{{.+}} col:30 implicit referenced class TestClassTemplate{{$}} // CHECK-NEXT: | |-AccessSpecDecl 0x{{.+}} col:3 public{{$}} -// CHECK-NEXT: | |-CXXConstructorDecl 0x{{.+}} col:5 TestClassTemplate 'void ()'{{$}} -// CHECK-NEXT: | |-CXXDestructorDecl 0x{{.+}} col:5 ~TestClassTemplate 'void ()' not_selected{{$}} -// CHECK-NEXT: | |-CXXMethodDecl 0x{{.+}} col:9 j 'int ()'{{$}} +// CHECK-NEXT: | |-CXXConstructorDecl 0x[[#%x,TEMPLATE_CONSTRUCTOR_DECL:]] col:5 TestClassTemplate 'void ()'{{$}} +// CHECK-NEXT: | |-CXXDestructorDecl 0x[[#%x,TEMPLATE_DESTRUCTOR_DECL:]] col:5 ~TestClassTemplate 'void ()' not_selected{{$}} +// CHECK-NEXT: | |-CXXMethodDecl 0x[[#%x,TEMPLATE_METHOD_DECL:]] col:9 j 'int ()'{{$}} // CHECK-NEXT: | `-FieldDecl 0x{{.+}} col:9 i 'int'{{$}} // CHECK-NEXT: |-ClassTemplateSpecializationDecl 0x{{.+}} line:[[@LINE-56]]:30 class TestClassTemplate definition implicit_instantiation{{$}} // CHECK-NEXT: | |-DefinitionData standard_layout has_user_declared_ctor can_const_default_init{{$}} @@ -345,9 +347,9 @@ namespace testClassTemplateDecl { // CHECK-NEXT: | | `-CXXRecord 0x{{.+}} 'A'{{$}} // CHECK-NEXT: | |-CXXRecordDecl 0x{{.+}} col:30 implicit class TestClassTemplate{{$}} // CHECK-NEXT: | |-AccessSpecDecl 0x{{.+}} col:3 public{{$}} -// CHECK-NEXT: | |-CXXConstructorDecl 0x{{.+}} col:5 used TestClassTemplate 'void ()' implicit_instantiation instantiated_from {{0x[^ ]+}}{{$}} -// CHECK-NEXT: | |-CXXDestructorDecl 0x{{.+}} col:5 used ~TestClassTemplate 'void () noexcept' implicit_instantiation instantiated_from {{0x[^ ]+}}{{$}} -// CHECK-NEXT: | |-CXXMethodDecl 0x{{.+}} col:9 j 'int ()' implicit_instantiation instantiated_from {{0x[^ ]+}}{{$}} +// CHECK-NEXT: | |-CXXConstructorDecl 0x{{.+}} col:5 used TestClassTemplate 'void ()' implicit_instantiation instantiated_from 0x[[#TEMPLATE_CONSTRUCTOR_DECL]]{{$}} +// CHECK-NEXT: | |-CXXDestructorDecl 0x{{.+}} col:5 used ~TestClassTemplate 'void () noexcept' implicit_instantiation instantiated_from 0x[[#TEMPLATE_DESTRUCTOR_DECL]]{{$}} +// CHECK-NEXT: | |-CXXMethodDecl 0x{{.+}} col:9 j 'int ()' implicit_instantiation instantiated_from 0x[[#TEMPLATE_METHOD_DECL]]{{$}} // CHECK-NEXT: | |-FieldDecl 0x{{.+}} col:9 i 'int'{{$}} // CHECK-NEXT: | `-CXXConstructorDecl 0x{{.+}} col:30 implicit constexpr TestClassTemplate 'void (const TestClassTemplate &)' inline default trivial noexcept-unevaluated 0x{{.+}}{{$}} // CHECK-NEXT: | `-ParmVarDecl 0x{{.+}} col:30 'const TestClassTemplate &'{{$}} @@ -487,6 +489,109 @@ namespace testClassTemplateDecl { // CHECK-NEXT: `-CXXRecordDecl 0x{{.+}} col:48 implicit struct TestTemplateTemplateDefaultType{{$}} +namespace testClassTemplateDecl { + template struct TestClassTemplateWithScopedMemberEnum { + enum class E1 : T { A, B, C, D }; + enum class E2 : int { A, B, C, D }; + enum class E3 : T; + enum class E4 : int; + }; + + template struct TestClassTemplateWithScopedMemberEnum; + + TestClassTemplateWithScopedMemberEnum TestClassTemplateWithScopedMemberEnumObject; +} + +// CHECK: ClassTemplateDecl 0x{{.+}} <{{.+}}:[[@LINE-12]]:3, line:[[@LINE-7]]:3> line:[[@LINE-12]]:31 TestClassTemplateWithScopedMemberEnum +// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} col:21 referenced typename depth 0 index 0 T +// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} line:[[@LINE-14]]:31 struct TestClassTemplateWithScopedMemberEnum definition +// CHECK: | |-EnumDecl 0x[[#%x,SCOPED_MEMBER_ENUM_E1:]] col:16 class E1 'T' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:25 A 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:28 B 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:31 C 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | | `-EnumConstantDecl 0x{{.+}} col:34 D 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumDecl 0x[[#%x,SCOPED_MEMBER_ENUM_E2:]] col:16 class E2 'int' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:27 A 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:30 B 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:33 C 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | | `-EnumConstantDecl 0x{{.+}} col:36 D 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumDecl 0x[[#%x,SCOPED_MEMBER_ENUM_E3:]] col:16 class E3 'T' +// CHECK-NEXT: | `-EnumDecl 0x[[#%x,SCOPED_MEMBER_ENUM_E4:]] col:16 class E4 'int' +// CHECK-NEXT: |-ClassTemplateSpecialization 0x{{.+}} 'TestClassTemplateWithScopedMemberEnum' +// CHECK-NEXT: `-ClassTemplateSpecializationDecl 0x{{.+}} line:[[@LINE-28]]:31 struct TestClassTemplateWithScopedMemberEnum definition implicit_instantiation +// CHECK: |-TemplateArgument type 'int' +// CHECK-NEXT: | `-BuiltinType 0x{{.+}} 'int' +// CHECK: |-EnumDecl 0x{{.+}} col:16 class E1 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E1]]{{$}} +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:16 class E2 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E2]]{{$}} +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:16 class E3 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E3]]{{$}} +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:16 class E4 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E4]]{{$}} + +// CHECK: ClassTemplateSpecializationDecl 0x{{.+}} <{{.+}}:[[@LINE-29]]:3, col:65> col:19 struct TestClassTemplateWithScopedMemberEnum definition explicit_instantiation_definition +// CHECK: |-TemplateArgument type 'unsigned int' +// CHECK-NEXT: | `-BuiltinType 0x{{.+}} 'unsigned int' +// CHECK: |-EnumDecl 0x{{.+}} col:16 class E1 'unsigned int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E1]]{{$}} +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:25 A 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:28 B 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:31 C 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: | `-EnumConstantDecl 0x{{.+}} col:34 D 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E1' +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:16 class E2 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E2]]{{$}} +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:27 A 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:30 B 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:33 C 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: | `-EnumConstantDecl 0x{{.+}} col:36 D 'testClassTemplateDecl::TestClassTemplateWithScopedMemberEnum::E2' +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:16 class E3 'unsigned int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E3]]{{$}} +// CHECK-NEXT: `-EnumDecl 0x{{.+}} col:16 class E4 'int' instantiated_from 0x[[#SCOPED_MEMBER_ENUM_E4]]{{$}} + + + + +namespace testClassTemplateDecl { + template struct TestClassTemplateWithUnscopedMemberEnum { + enum E1 : T { E1_A, E1_B, E1_C, E1_D }; + enum E2 : int { E2_A, E2_B, E2_C, E2_D }; + enum E3 : T; + enum E4 : int; + }; + + template struct TestClassTemplateWithUnscopedMemberEnum; + + TestClassTemplateWithUnscopedMemberEnum TestClassTemplateWithUnscopedMemberEnumObject; +} + +// CHECK: ClassTemplateDecl 0x{{.+}} <{{.+}}:[[@LINE-12]]:3, line:[[@LINE-7]]:3> line:[[@LINE-12]]:31 TestClassTemplateWithUnscopedMemberEnum +// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} col:21 referenced typename depth 0 index 0 T +// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} line:[[@LINE-14]]:31 struct TestClassTemplateWithUnscopedMemberEnum definition +// CHECK: | |-EnumDecl 0x[[#%x,UNSCOPED_MEMBER_ENUM_E1:]] col:10 E1 'T' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:19 E1_A 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:25 E1_B 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:31 E1_C 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | | `-EnumConstantDecl 0x{{.+}} col:37 E1_D 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumDecl 0x[[#%x,UNSCOPED_MEMBER_ENUM_E2:]] col:10 E2 'int' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:21 E2_A 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:27 E2_B 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | | |-EnumConstantDecl 0x{{.+}} col:33 E2_C 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | | `-EnumConstantDecl 0x{{.+}} col:39 E2_D 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumDecl 0x[[#%x,UNSCOPED_MEMBER_ENUM_E3:]] col:10 E3 'T' +// CHECK-NEXT: | `-EnumDecl 0x[[#%x,UNSCOPED_MEMBER_ENUM_E4:]] col:10 E4 'int' +// CHECK-NEXT: `-ClassTemplateSpecialization {{.+}} 'TestClassTemplateWithUnscopedMemberEnum' + +// CHECK: ClassTemplateSpecializationDecl 0x{{.+}} <{{.+}}:[[@LINE-22]]:3, col:67> col:19 struct TestClassTemplateWithUnscopedMemberEnum definition explicit_instantiation_definition +// CHECK: |-TemplateArgument type 'unsigned int' +// CHECK-NEXT: | `-BuiltinType 0x{{.+}} 'unsigned int' +// CHECK: |-EnumDecl 0x{{.+}} col:10 E1 'unsigned int' instantiated_from 0x[[#UNSCOPED_MEMBER_ENUM_E1]]{{$}} +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:19 E1_A 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:25 E1_B 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:31 E1_C 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: | `-EnumConstantDecl 0x{{.+}} col:37 E1_D 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E1' +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:10 E2 'int' instantiated_from 0x[[#UNSCOPED_MEMBER_ENUM_E2]]{{$}} +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:21 E2_A 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:27 E2_B 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | |-EnumConstantDecl 0x{{.+}} col:33 E2_C 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: | `-EnumConstantDecl 0x{{.+}} col:39 E2_D 'testClassTemplateDecl::TestClassTemplateWithUnscopedMemberEnum::E2' +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:10 E3 'unsigned int' instantiated_from 0x[[#UNSCOPED_MEMBER_ENUM_E3]]{{$}} +// CHECK-NEXT: |-EnumDecl 0x{{.+}} col:10 E4 'int' instantiated_from 0x[[#UNSCOPED_MEMBER_ENUM_E4]]{{$}} + + // PR15220 dump instantiation only once namespace testCanonicalTemplate { class A {}; diff --git a/clang/test/Analysis/out-of-bounds.c b/clang/test/Analysis/out-of-bounds.c index 7a094b8fdc840..7d6cb4ecf1b24 100644 --- a/clang/test/Analysis/out-of-bounds.c +++ b/clang/test/Analysis/out-of-bounds.c @@ -188,29 +188,31 @@ int test_cast_to_unsigned(signed char x) { if (x >= 0) return x; // FIXME: Here the analyzer ignores the signed -> unsigned cast, and manages to - // load a negative value from an unsigned variable. This causes an underflow - // report, which is an ugly false positive. + // load a negative value from an unsigned variable. // The underlying issue is tracked by Github ticket #39492. clang_analyzer_value(y); // expected-warning {{8s:{ [-128, -1] } }} - return table[y]; // expected-warning {{Out of bound access to memory preceding}} + // However, a hack in the ArrayBound checker suppresses the false positive + // underflow report that would be generated here. + return table[y]; // no-warning } int test_cast_to_unsigned_overflow(signed char x) { unsigned char y = x; if (x >= 0) return x; - // A variant of 'test_cast_to_unsigned' where the correct behavior would be - // an overflow report (because the negative values are cast to `unsigned - // char` values that are too large). - // FIXME: See comment in 'test_cast_to_unsigned'. + // FIXME: As in 'test_cast_to_unsigned', the analyzer thinks that this + // unsigned variable contains a negative value. clang_analyzer_value(y); // expected-warning {{8s:{ [-128, -1] } }} - return small_table[y]; // expected-warning {{Out of bound access to memory preceding}} + // FIXME: The following subscript expression should produce an overflow + // report (because negative signed char corresponds to unsigned char >= 128); + // but the hack in ArrayBound just silences reports and cannot "restore" the + // real execution paths. + return small_table[y]; // no-warning } int test_negative_offset_with_unsigned_idx(void) { // An example where the subscript operator uses an unsigned index, but the - // underflow report is still justified. (We should try to keep this if we - // silence false positives like the one in 'test_cast_to_unsigned'.) + // underflow report is still justified. int *p = table - 10; unsigned idx = 2u; return p[idx]; // expected-warning {{Out of bound access to memory preceding}} diff --git a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip index c1a30633f3d0a..2342fcefb5f89 100644 --- a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip +++ b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip @@ -25,7 +25,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP4]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) { @@ -49,7 +49,7 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 [[TMP2]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) { @@ -73,7 +73,7 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constan // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 [[TMP2]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) { @@ -97,7 +97,7 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(v // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 5678) +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) { diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl index 2c7bc10fb609c..29093c09c39d0 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl @@ -4,7 +4,7 @@ // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) { @@ -13,7 +13,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, in // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_stride_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) { @@ -22,7 +22,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_num_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) { @@ -31,7 +31,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, sho // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_flags_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) { @@ -40,7 +40,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, s // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global void *p, short stride, int num, int flags) { @@ -49,7 +49,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global void *p, short str // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_stride_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_stride_constant(global void *p, int num, int flags) { @@ -58,7 +58,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_stride_constant(global vo // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_num_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_num_constant(global void *p, short stride, int flags) { @@ -67,7 +67,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_num_constant(global void // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_flags_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global void *p, short stride, int num) { @@ -76,7 +76,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global voi // CHECK-LABEL: @test_amdgcn_make_buffer_p0_nullptr( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, int flags) { @@ -85,7 +85,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, // CHECK-LABEL: @test_amdgcn_make_buffer_p1_nullptr( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p1_nullptr(short stride, int num, int flags) { diff --git a/clang/test/Index/Core/index-dependent-source.cpp b/clang/test/Index/Core/index-dependent-source.cpp index 8fec9abd1e926..ef414c8fdf7a0 100644 --- a/clang/test/Index/Core/index-dependent-source.cpp +++ b/clang/test/Index/Core/index-dependent-source.cpp @@ -3,7 +3,7 @@ int invalid; class Base { - void baseFunction(); + void baseFunction() const; int baseField; @@ -13,7 +13,7 @@ class Base { template class BaseTemplate { public: - T baseTemplateFunction(); + T baseTemplateFunction() const; T baseTemplateField; @@ -25,7 +25,7 @@ class TemplateClass: public Base , public BaseTemplate { public: ~TemplateClass(); - T function() { } + T function() const { } static void staticFunction() { } @@ -48,27 +48,27 @@ template void indexSimpleDependentDeclarations(const TemplateClass &object) { // Valid instance members: object.function(); -// CHECK: [[@LINE-1]]:10 | instance-method/C++ | function | c:@ST>2#T#T@TemplateClass@F@function# | | Ref,Call,RelCall,RelCont | rel: 1 +// CHECK: [[@LINE-1]]:10 | instance-method/C++ | function | c:@ST>2#T#T@TemplateClass@F@function#1 | | Ref,Call,RelCall,RelCont | rel: 1 object.field; // CHECK: [[@LINE-1]]:10 | field/C++ | field | c:@ST>2#T#T@TemplateClass@FI@field | | Ref,RelCont | rel: 1 object.baseFunction(); -// CHECK: [[@LINE-1]]:10 | instance-method/C++ | baseFunction | c:@S@Base@F@baseFunction# | __ZN4Base12baseFunctionEv | Ref,Call,RelCall,RelCont | rel: 1 +// CHECK: [[@LINE-1]]:10 | instance-method/C++ | baseFunction | c:@S@Base@F@baseFunction#1 | __ZNK4Base12baseFunctionEv | Ref,Call,RelCall,RelCont | rel: 1 object.baseField; // CHECK: [[@LINE-1]]:10 | field/C++ | baseField | c:@S@Base@FI@baseField | | Ref,RelCont | rel: 1 object.baseTemplateFunction(); -// CHECK: [[@LINE-1]]:10 | instance-method/C++ | baseTemplateFunction | c:@ST>1#T@BaseTemplate@F@baseTemplateFunction# | | Ref,Call,RelCall,RelCont | rel: 1 +// CHECK: [[@LINE-1]]:10 | instance-method/C++ | baseTemplateFunction | c:@ST>1#T@BaseTemplate@F@baseTemplateFunction#1 | | Ref,Call,RelCall,RelCont | rel: 1 object.baseTemplateField; // CHECK: [[@LINE-1]]:10 | field/C++ | baseTemplateField | c:@ST>1#T@BaseTemplate@FI@baseTemplateField | | Ref,RelCont | rel: 1 - // Invalid instance members: + // Static members (these are still valid to access via an instance): object.variable; -// CHECK-NOT: [[@LINE-1]]:10 +// CHECK: [[@LINE-1]]:10 | static-property/C++ | variable | c:@ST>2#T#T@TemplateClass@variable | __ZN13TemplateClass8variableE | Ref,RelCont | rel: 1 object.staticFunction(); -// CHECK-NOT: [[@LINE-1]]:10 +// CHECK: [[@LINE-1]]:10 | static-method/C++ | staticFunction | c:@ST>2#T#T@TemplateClass@F@staticFunction#S | | Ref,Call,RelCall,RelCont | rel: 1 object.Struct; -// CHECK-NOT: [[@LINE-1]]:10 +// CHECK: [[@LINE-1]]:10 | struct/C | Struct | c:@ST>2#T#T@TemplateClass@S@Struct | | Ref,RelCont | rel: 1 object.EnumValue; -// CHECK-NOT: [[@LINE-1]]:10 +// CHECK: [[@LINE-1]]:10 | enumerator/C | EnumValue | c:@ST>2#T#T@TemplateClass@E@Enum@EnumValue | | Ref,RelCont | rel: 1 // Valid static members: TemplateClass::staticFunction(); diff --git a/clang/test/Preprocessor/riscv-cf-protection-return.c b/clang/test/Preprocessor/riscv-cf-protection-return.c new file mode 100644 index 0000000000000..3a93a88fa6839 --- /dev/null +++ b/clang/test/Preprocessor/riscv-cf-protection-return.c @@ -0,0 +1,44 @@ +// RUN: %clang --target=riscv32 -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv32 -fcf-protection=return -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv32 -fcf-protection=full -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv32 -march=rv32i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv32 -march=rv32i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -fcf-protection=return -E -dM %s \ +// RUN: -o - | FileCheck --check-prefixes=SHSTK-MACRO %s + +// RUN: %clang --target=riscv32 -march=rv32i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -fcf-protection=full -E -dM %s -o - \ +// RUN: | FileCheck --check-prefixes=SHSTK-MACRO %s + +// RUN: %clang --target=riscv64 -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv64 -fcf-protection=return -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv64 -fcf-protection=full -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv64 -march=rv64i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -E -dM %s -o - | \ +// RUN: FileCheck --check-prefixes=NO-MACRO %s + +// RUN: %clang --target=riscv64 -march=rv64i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -fcf-protection=return -E -dM %s \ +// RUN: -o - | FileCheck --check-prefixes=SHSTK-MACRO %s + +// RUN: %clang --target=riscv64 -march=rv64i_zicfiss1p0 \ +// RUN: -menable-experimental-extensions -fcf-protection=full -E -dM %s -o - \ +// RUN: | FileCheck --check-prefixes=SHSTK-MACRO %s + +// SHSTK-MACRO: __riscv_shadow_stack 1{{$}} +// NO-MACRO-NOT: __riscv_shadow_stack diff --git a/clang/test/Sema/gh106576.c b/clang/test/Sema/gh106576.c new file mode 100644 index 0000000000000..a72592aac0129 --- /dev/null +++ b/clang/test/Sema/gh106576.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s + +typedef _Atomic char atomic_char; + +atomic_char counter; + +char load_plus_one(void) { + return ({counter;}) + 1; // no crash +} + +char type_of_stmt_expr(void) { + typeof(({counter;})) y = ""; // expected-error-re {{incompatible pointer to integer conversion initializing 'typeof (({{{.*}}}))' (aka 'char') with an expression of type 'char[1]'}} + return y; +} diff --git a/clang/test/SemaCXX/coroutine-noreturn.cpp b/clang/test/SemaCXX/coroutine-noreturn.cpp new file mode 100644 index 0000000000000..4516b4e720ec0 --- /dev/null +++ b/clang/test/SemaCXX/coroutine-noreturn.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 %s -std=c++20 -fsyntax-only -Winvalid-noreturn -verify + +#include "Inputs/std-coroutine.h" + +struct Promise; + +struct Awaitable { + bool await_ready(); + void await_suspend(std::coroutine_handle<>); + void await_resume(); +}; + +struct Coro : std::coroutine_handle<> { + using promise_type = Promise; +}; + +struct Promise { + Coro get_return_object(); + std::suspend_always initial_suspend() noexcept; + std::suspend_always final_suspend() noexcept; + void return_void(); + void unhandled_exception(); +}; + +[[noreturn]] Coro test() { // expected-warning {{coroutine 'test' cannot be declared 'noreturn' as it always returns a coroutine handle}} + co_await Awaitable{}; +} + +// NO warning here. This could be a regular function returning a `Coro` object. +[[noreturn]] Coro test2(); diff --git a/clang/test/SemaCXX/cxx2c-binding-pack.cpp b/clang/test/SemaCXX/cxx2c-binding-pack.cpp index 5ca249f52b3d8..62e1da565f2b5 100644 --- a/clang/test/SemaCXX/cxx2c-binding-pack.cpp +++ b/clang/test/SemaCXX/cxx2c-binding-pack.cpp @@ -59,6 +59,7 @@ template void decompose_struct() { T obj{1, 2, 3, 6}; auto [x, ...rest, y] = obj; + static_assert(sizeof...(rest) == 2); auto [...empty] = type_{}; static_assert(sizeof...(empty) == 0); @@ -124,6 +125,14 @@ void lambda_capture() { [&x...] { (void)sum(x...); }(); } +struct S2 { + int a, b, c; +}; + +auto X = [] () { + auto [...pack] = S2{}; +}; + int main() { decompose_array(); decompose_tuple(); @@ -133,6 +142,8 @@ int main() { lambda_capture(); lambda_capture(); lambda_capture(); + X(); + } // P1061R10 Stuff @@ -188,3 +199,22 @@ void other_main() { static_assert(f() == 2); } } // namespace + +namespace { +struct S { + int a,b,c; +}; + +clsss S2 { // expected-error{{{unknown type name 'clsss'}}} +public: + int a,b,c; +}; + +// Should not crash. +auto X = [] () { + auto [...pack,a,b,c] = S{}; + auto [x,y,z,...pack2] = S{}; + auto [...pack3] = S2{}; + static_assert(sizeof...(pack3) == 5); +}; +} // namespace diff --git a/clang/tools/clang-scan-deps/ClangScanDeps.cpp b/clang/tools/clang-scan-deps/ClangScanDeps.cpp index 9cdb1eae56187..3bdeb461e4bfa 100644 --- a/clang/tools/clang-scan-deps/ClangScanDeps.cpp +++ b/clang/tools/clang-scan-deps/ClangScanDeps.cpp @@ -298,12 +298,14 @@ class ResourceDirectoryCache { }; if (llvm::sys::ExecuteAndWait(ClangBinaryPath, PrintResourceDirArgs, {}, Redirects)) { - auto ErrorBuf = llvm::MemoryBuffer::getFile(ErrorFile.c_str()); + auto ErrorBuf = + llvm::MemoryBuffer::getFile(ErrorFile.c_str(), /*IsText=*/true); llvm::errs() << ErrorBuf.get()->getBuffer(); return ""; } - auto OutputBuf = llvm::MemoryBuffer::getFile(OutputFile.c_str()); + auto OutputBuf = + llvm::MemoryBuffer::getFile(OutputFile.c_str(), /*IsText=*/true); if (!OutputBuf) return ""; StringRef Output = OutputBuf.get()->getBuffer().rtrim('\n'); @@ -1032,7 +1034,8 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) { std::unique_ptr TU; std::optional TUBuffer; if (!TranslationUnitFile.empty()) { - auto MaybeTU = llvm::MemoryBuffer::getFile(TranslationUnitFile); + auto MaybeTU = + llvm::MemoryBuffer::getFile(TranslationUnitFile, /*IsText=*/true); if (!MaybeTU) { llvm::errs() << "cannot open input translation unit: " << MaybeTU.getError().message() << "\n"; diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp index 9ca0ce36bb7f2..0810c38bb751b 100644 --- a/clang/tools/libclang/CXCursor.cpp +++ b/clang/tools/libclang/CXCursor.cpp @@ -338,7 +338,6 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent, case Stmt::EmbedExprClass: case Stmt::HLSLOutArgExprClass: case Stmt::OpenACCAsteriskSizeExprClass: - case Stmt::ResolvedUnexpandedPackExprClass: K = CXCursor_UnexposedExpr; break; diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp index 2365a7c40bf76..d6d028436d39c 100644 --- a/clang/unittests/Format/FormatTest.cpp +++ b/clang/unittests/Format/FormatTest.cpp @@ -29028,6 +29028,11 @@ TEST_F(FormatTest, WrapNamespaceBodyWithEmptyLinesAlways) { Style); } +TEST_F(FormatTest, BreakBeforeClassName) { + verifyFormat("class ABSL_ATTRIBUTE_TRIVIAL_ABI ABSL_NULLABILITY_COMPATIBLE\n" + " ArenaSafeUniquePtr {};"); +} + } // namespace } // namespace test } // namespace format diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index 1d0870c818acc..8ada6c3daeaf6 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -3250,6 +3250,10 @@ TEST_F(TokenAnnotatorTest, StartOfName) { EXPECT_TOKEN(Tokens[0], tok::at, TT_ObjCDecl); EXPECT_TOKEN(Tokens[2], tok::identifier, TT_StartOfName); + Tokens = annotate("class FOO BAR C {};"); + ASSERT_EQ(Tokens.size(), 8u) << Tokens; + EXPECT_TOKEN(Tokens[2], tok::identifier, TT_Unknown); // Not StartOfName + auto Style = getLLVMStyle(); Style.StatementAttributeLikeMacros.push_back("emit"); Tokens = annotate("emit foo = 0;", Style); diff --git a/compiler-rt/test/orc/TestCases/Generic/Inputs/bar-ret-void-weak.ll b/compiler-rt/test/orc/TestCases/Generic/Inputs/bar-ret-void-weak.ll new file mode 100644 index 0000000000000..7301b43e7c92d --- /dev/null +++ b/compiler-rt/test/orc/TestCases/Generic/Inputs/bar-ret-void-weak.ll @@ -0,0 +1,4 @@ +define weak void @bar() { +entry: + ret void +} diff --git a/compiler-rt/test/orc/TestCases/Generic/Inputs/baz-ret-void-hidden.ll b/compiler-rt/test/orc/TestCases/Generic/Inputs/baz-ret-void-hidden.ll new file mode 100644 index 0000000000000..27e19deea6ebd --- /dev/null +++ b/compiler-rt/test/orc/TestCases/Generic/Inputs/baz-ret-void-hidden.ll @@ -0,0 +1,4 @@ +define hidden void @baz() { +entry: + ret void +} diff --git a/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll b/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll index 5a8dbfc532b0f..1c375bcf1e62f 100644 --- a/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll +++ b/compiler-rt/test/orc/TestCases/Generic/lazy-link.ll @@ -6,9 +6,11 @@ ; RUN: rm -rf %t && mkdir -p %t ; RUN: %clang -c -o %t/foo.o %S/Inputs/foo-ret-42.ll ; RUN: %clang -c -o %t/x.o %S/Inputs/var-x-42.ll +; RUN: %clang -c -o %t/bar.o %S/Inputs/bar-ret-void-weak.ll +; RUN: %clang -c -o %t/baz.o %S/Inputs/baz-ret-void-hidden.ll ; RUN: %clang -c -o %t/main.o %s ; RUN: %llvm_jitlink -noexec -show-linked-files %t/main.o -lazy %t/foo.o \ -; RUN: -lazy %t/x.o | FileCheck %s +; RUN: -lazy %t/x.o -lazy %t/bar.o -lazy %t/baz.o | FileCheck %s ; ; UNSUPPORTED: system-windows ; REQUIRES: target={{(arm|aarch|x86_)64.*}} @@ -21,9 +23,15 @@ declare i32 @foo() @x = external global i32 +declare void @bar() +declare hidden void @baz() + + define i32 @main(i32 %argc, ptr %argv) { entry: %foo_result = call i32 @foo() + call void @bar() + call void @baz() %x_val = load i32, ptr @x %result = add nsw i32 %foo_result, %x_val ret i32 %result diff --git a/flang-rt/include/flang-rt/runtime/io-stmt.h b/flang-rt/include/flang-rt/runtime/io-stmt.h index a364ddfd9b3c7..33cc91271ab12 100644 --- a/flang-rt/include/flang-rt/runtime/io-stmt.h +++ b/flang-rt/include/flang-rt/runtime/io-stmt.h @@ -627,7 +627,7 @@ class OpenStatementState : public ExternalIoStatementBase { Fortran::common::optional action_; Convert convert_{Convert::Unknown}; OwningPtr path_; - std::size_t pathLength_; + std::size_t pathLength_{}; Fortran::common::optional isUnformatted_; Fortran::common::optional access_; }; diff --git a/flang/include/flang/Lower/AbstractConverter.h b/flang/include/flang/Lower/AbstractConverter.h index 3d2b805da6f47..1d1323642bf9c 100644 --- a/flang/include/flang/Lower/AbstractConverter.h +++ b/flang/include/flang/Lower/AbstractConverter.h @@ -314,6 +314,8 @@ class AbstractConverter { mangleName(const Fortran::semantics::DerivedTypeSpec &) = 0; /// Unique a compiler generated name (add a containing scope specific prefix) virtual std::string mangleName(std::string &) = 0; + /// Unique a compiler generated name (add a provided scope specific prefix) + virtual std::string mangleName(std::string &, const semantics::Scope &) = 0; /// Return the field name for a derived type component inside a fir.record /// type. virtual std::string diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 36e58e456dea3..7c217ce2f404c 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -1049,6 +1049,11 @@ class FirConverter : public Fortran::lower::AbstractConverter { return Fortran::lower::mangle::mangleName(name, getCurrentScope(), scopeBlockIdMap); } + std::string + mangleName(std::string &name, + const Fortran::semantics::Scope &myScope) override final { + return Fortran::lower::mangle::mangleName(name, myScope, scopeBlockIdMap); + } std::string getRecordTypeFieldName( const Fortran::semantics::Symbol &component) override final { return Fortran::lower::mangle::getRecordTypeFieldName(component, diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp index febc6adcf9d6f..e21d299570b86 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp @@ -969,8 +969,11 @@ void ClauseProcessor::processMapObjects( llvm::omp::OpenMPOffloadMappingFlags mapTypeBits, std::map &parentMemberIndices, llvm::SmallVectorImpl &mapVars, - llvm::SmallVectorImpl &mapSyms) const { + llvm::SmallVectorImpl &mapSyms, + llvm::StringRef mapperIdNameRef) const { fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + mlir::FlatSymbolRefAttr mapperId; + std::string mapperIdName = mapperIdNameRef.str(); for (const omp::Object &object : objects) { llvm::SmallVector bounds; @@ -1003,6 +1006,20 @@ void ClauseProcessor::processMapObjects( } } + if (!mapperIdName.empty()) { + if (mapperIdName == "default") { + auto &typeSpec = object.sym()->owner().IsDerivedType() + ? *object.sym()->owner().derivedTypeSpec() + : object.sym()->GetType()->derivedTypeSpec(); + mapperIdName = typeSpec.name().ToString() + ".default"; + mapperIdName = converter.mangleName(mapperIdName, *typeSpec.GetScope()); + } + assert(converter.getModuleOp().lookupSymbol(mapperIdName) && + "mapper not found"); + mapperId = mlir::FlatSymbolRefAttr::get(&converter.getMLIRContext(), + mapperIdName); + mapperIdName.clear(); + } // Explicit map captures are captured ByRef by default, // optimisation passes may alter this to ByCopy or other capture // types to optimise @@ -1016,7 +1033,8 @@ void ClauseProcessor::processMapObjects( static_cast< std::underlying_type_t>( mapTypeBits), - mlir::omp::VariableCaptureKind::ByRef, baseOp.getType()); + mlir::omp::VariableCaptureKind::ByRef, baseOp.getType(), + /*partialMap=*/false, mapperId); if (parentObj.has_value()) { parentMemberIndices[parentObj.value()].addChildIndexAndMapToParent( @@ -1047,6 +1065,7 @@ bool ClauseProcessor::processMap( const auto &[mapType, typeMods, mappers, iterator, objects] = clause.t; llvm::omp::OpenMPOffloadMappingFlags mapTypeBits = llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE; + std::string mapperIdName; // If the map type is specified, then process it else Tofrom is the // default. Map::MapType type = mapType.value_or(Map::MapType::Tofrom); @@ -1090,13 +1109,17 @@ bool ClauseProcessor::processMap( "Support for iterator modifiers is not implemented yet"); } if (mappers) { - TODO(currentLocation, - "Support for mapper modifiers is not implemented yet"); + assert(mappers->size() == 1 && "more than one mapper"); + mapperIdName = mappers->front().v.id().symbol->name().ToString(); + if (mapperIdName != "default") + mapperIdName = converter.mangleName( + mapperIdName, mappers->front().v.id().symbol->owner()); } processMapObjects(stmtCtx, clauseLocation, std::get(clause.t), mapTypeBits, - parentMemberIndices, result.mapVars, *ptrMapSyms); + parentMemberIndices, result.mapVars, *ptrMapSyms, + mapperIdName); }; bool clauseFound = findRepeatableClause(process); diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h index e05f66c766684..889a09a8f0cd8 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.h +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h @@ -175,7 +175,8 @@ class ClauseProcessor { llvm::omp::OpenMPOffloadMappingFlags mapTypeBits, std::map &parentMemberIndices, llvm::SmallVectorImpl &mapVars, - llvm::SmallVectorImpl &mapSyms) const; + llvm::SmallVectorImpl &mapSyms, + llvm::StringRef mapperIdNameRef = "") const; lower::AbstractConverter &converter; semantics::SemanticsContext &semaCtx; diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index bd794033cdf11..e0d23fc53eeca 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -3119,7 +3119,51 @@ static void genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable, semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, const parser::OpenMPDeclareMapperConstruct &declareMapperConstruct) { - TODO(converter.getCurrentLocation(), "OpenMPDeclareMapperConstruct"); + mlir::Location loc = converter.genLocation(declareMapperConstruct.source); + fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + lower::StatementContext stmtCtx; + const auto &spec = + std::get(declareMapperConstruct.t); + const auto &mapperName{std::get>(spec.t)}; + const auto &varType{std::get(spec.t)}; + const auto &varName{std::get(spec.t)}; + assert(varType.declTypeSpec->category() == + semantics::DeclTypeSpec::Category::TypeDerived && + "Expected derived type"); + + std::string mapperNameStr; + if (mapperName.has_value()) { + mapperNameStr = mapperName->ToString(); + mapperNameStr = + converter.mangleName(mapperNameStr, mapperName->symbol->owner()); + } else { + mapperNameStr = + varType.declTypeSpec->derivedTypeSpec().name().ToString() + ".default"; + mapperNameStr = converter.mangleName( + mapperNameStr, *varType.declTypeSpec->derivedTypeSpec().GetScope()); + } + + // Save current insertion point before moving to the module scope to create + // the DeclareMapperOp + mlir::OpBuilder::InsertionGuard guard(firOpBuilder); + + firOpBuilder.setInsertionPointToStart(converter.getModuleOp().getBody()); + auto mlirType = converter.genType(varType.declTypeSpec->derivedTypeSpec()); + auto declMapperOp = firOpBuilder.create( + loc, mapperNameStr, mlirType); + auto ®ion = declMapperOp.getRegion(); + firOpBuilder.createBlock(®ion); + auto varVal = region.addArgument(firOpBuilder.getRefType(mlirType), loc); + converter.bindSymbol(*varName.symbol, varVal); + + // Populate the declareMapper region with the map information. + mlir::omp::DeclareMapperInfoOperands clauseOps; + const auto *clauseList{ + parser::Unwrap(declareMapperConstruct.t)}; + List clauses = makeClauses(*clauseList, semaCtx); + ClauseProcessor cp(converter, semaCtx, clauses); + cp.processMap(loc, stmtCtx, clauseOps); + firOpBuilder.create(loc, clauseOps.mapVars); } static void diff --git a/flang/lib/Lower/OpenMP/Utils.cpp b/flang/lib/Lower/OpenMP/Utils.cpp index 35722fa7d1b12..fa1975dac789b 100644 --- a/flang/lib/Lower/OpenMP/Utils.cpp +++ b/flang/lib/Lower/OpenMP/Utils.cpp @@ -125,7 +125,7 @@ createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc, llvm::ArrayRef members, mlir::ArrayAttr membersIndex, uint64_t mapType, mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy, - bool partialMap) { + bool partialMap, mlir::FlatSymbolRefAttr mapperId) { if (auto boxTy = llvm::dyn_cast(baseAddr.getType())) { baseAddr = builder.create(loc, baseAddr); retTy = baseAddr.getType(); @@ -144,6 +144,7 @@ createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc, mlir::omp::MapInfoOp op = builder.create( loc, retTy, baseAddr, varType, varPtrPtr, members, membersIndex, bounds, builder.getIntegerAttr(builder.getIntegerType(64, false), mapType), + mapperId, builder.getAttr(mapCaptureType), builder.getStringAttr(name), builder.getBoolAttr(partialMap)); return op; diff --git a/flang/lib/Lower/OpenMP/Utils.h b/flang/lib/Lower/OpenMP/Utils.h index f2e378443e5f2..3943eb633b04e 100644 --- a/flang/lib/Lower/OpenMP/Utils.h +++ b/flang/lib/Lower/OpenMP/Utils.h @@ -116,7 +116,8 @@ createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc, llvm::ArrayRef members, mlir::ArrayAttr membersIndex, uint64_t mapType, mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy, - bool partialMap = false); + bool partialMap = false, + mlir::FlatSymbolRefAttr mapperId = mlir::FlatSymbolRefAttr()); void insertChildMapInfoIntoParent( Fortran::lower::AbstractConverter &converter, diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp index 98e325c307d97..beea7543e54b3 100644 --- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp +++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp @@ -184,6 +184,7 @@ class MapInfoFinalizationPass /*members=*/mlir::SmallVector{}, /*membersIndex=*/mlir::ArrayAttr{}, bounds, builder.getIntegerAttr(builder.getIntegerType(64, false), mapType), + /*mapperId*/ mlir::FlatSymbolRefAttr(), builder.getAttr( mlir::omp::VariableCaptureKind::ByRef), /*name=*/builder.getStringAttr(""), @@ -329,7 +330,8 @@ class MapInfoFinalizationPass builder.getIntegerAttr( builder.getIntegerType(64, false), getDescriptorMapType(op.getMapType().value_or(0), target)), - op.getMapCaptureTypeAttr(), op.getNameAttr(), + /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getMapCaptureTypeAttr(), + op.getNameAttr(), /*partial_map=*/builder.getBoolAttr(false)); op.replaceAllUsesWith(newDescParentMapOp.getResult()); op->erase(); @@ -464,7 +466,8 @@ class MapInfoFinalizationPass for (auto *user : mapOp->getUsers()) { if (llvm::isa(user)) + mlir::omp::TargetEnterDataOp, + mlir::omp::DeclareMapperInfoOp>(user)) return user; if (auto mapUser = llvm::dyn_cast(user)) @@ -497,7 +500,9 @@ class MapInfoFinalizationPass // ourselves to the possibility of race conditions while this pass // undergoes frequent re-iteration for the near future. So we loop // over function in the module and then map.info inside of those. - getOperation()->walk([&](mlir::func::FuncOp func) { + getOperation()->walk([&](mlir::Operation *func) { + if (!mlir::isa(func)) + return; // clear all local allocations we made for any boxes in any prior // iterations from previous function scopes. localBoxAllocas.clear(); @@ -620,6 +625,7 @@ class MapInfoFinalizationPass /*members=*/mlir::ValueRange{}, /*members_index=*/mlir::ArrayAttr{}, /*bounds=*/bounds, op.getMapTypeAttr(), + /*mapperId*/ mlir::FlatSymbolRefAttr(), builder.getAttr( mlir::omp::VariableCaptureKind::ByRef), builder.getStringAttr(op.getNameAttr().strref() + "." + diff --git a/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp index 963ae863c1fc5..97ea463a3c495 100644 --- a/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp +++ b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp @@ -91,6 +91,7 @@ class MapsForPrivatizedSymbolsPass /*bounds=*/ValueRange{}, builder.getIntegerAttr(builder.getIntegerType(64, /*isSigned=*/false), mapTypeTo), + /*mapperId*/ mlir::FlatSymbolRefAttr(), builder.getAttr( omp::VariableCaptureKind::ByRef), StringAttr(), builder.getBoolAttr(false)); diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir index 8e4e1fe824d9f..7cdcd2a10e975 100644 --- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir +++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir @@ -936,9 +936,9 @@ func.func @omp_map_info_descriptor_type_conversion(%arg0 : !fir.ref>, i32) map_clauses(tofrom) capture(ByRef) -> !fir.llvm_ptr> {name = ""} // CHECK: %[[DESC_MAP:.*]] = omp.map.info var_ptr(%[[ARG_0]] : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>) map_clauses(always, delete) capture(ByRef) members(%[[MEMBER_MAP]] : [0] : !llvm.ptr) -> !llvm.ptr {name = ""} %2 = omp.map.info var_ptr(%arg0 : !fir.ref>>, !fir.box>) map_clauses(always, delete) capture(ByRef) members(%1 : [0] : !fir.llvm_ptr>) -> !fir.ref>> {name = ""} - // CHECK: omp.target_exit_data map_entries(%[[DESC_MAP]] : !llvm.ptr) + // CHECK: omp.target_exit_data map_entries(%[[DESC_MAP]] : !llvm.ptr) omp.target_exit_data map_entries(%2 : !fir.ref>>) - return + return } // ----- @@ -956,8 +956,8 @@ func.func @omp_map_info_derived_type_explicit_member_conversion(%arg0 : !fir.ref %3 = fir.field_index real, !fir.type<_QFderived_type{real:f32,array:!fir.array<10xi32>,int:i32}> %4 = fir.coordinate_of %arg0, %3 : (!fir.ref,int:i32}>>, !fir.field) -> !fir.ref // CHECK: %[[MAP_MEMBER_2:.*]] = omp.map.info var_ptr(%[[GEP_2]] : !llvm.ptr, f32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "dtype%real"} - %5 = omp.map.info var_ptr(%4 : !fir.ref, f32) map_clauses(tofrom) capture(ByRef) -> !fir.ref {name = "dtype%real"} - // CHECK: %[[MAP_PARENT:.*]] = omp.map.info var_ptr(%[[ARG_0]] : !llvm.ptr, !llvm.struct<"_QFderived_type", (f32, array<10 x i32>, i32)>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_MEMBER_1]], %[[MAP_MEMBER_2]] : [2], [0] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "dtype", partial_map = true} + %5 = omp.map.info var_ptr(%4 : !fir.ref, f32) map_clauses(tofrom) capture(ByRef) -> !fir.ref {name = "dtype%real"} + // CHECK: %[[MAP_PARENT:.*]] = omp.map.info var_ptr(%[[ARG_0]] : !llvm.ptr, !llvm.struct<"_QFderived_type", (f32, array<10 x i32>, i32)>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_MEMBER_1]], %[[MAP_MEMBER_2]] : [2], [0] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "dtype", partial_map = true} %6 = omp.map.info var_ptr(%arg0 : !fir.ref,int:i32}>>, !fir.type<_QFderived_type{real:f32,array:!fir.array<10xi32>,int:i32}>) map_clauses(tofrom) capture(ByRef) members(%2, %5 : [2], [0] : !fir.ref, !fir.ref) -> !fir.ref,int:i32}>> {name = "dtype", partial_map = true} // CHECK: omp.target map_entries(%[[MAP_MEMBER_1]] -> %[[ARG_1:.*]], %[[MAP_MEMBER_2]] -> %[[ARG_2:.*]], %[[MAP_PARENT]] -> %[[ARG_3:.*]] : !llvm.ptr, !llvm.ptr, !llvm.ptr) { omp.target map_entries(%2 -> %arg1, %5 -> %arg2, %6 -> %arg3 : !fir.ref, !fir.ref, !fir.ref,int:i32}>>) { @@ -1275,3 +1275,22 @@ func.func @map_nested_dtype_alloca_mem2(%arg0 : !fir.ref { +omp.declare_mapper @my_mapper : !fir.type<_QFdeclare_mapperTmy_type{data:i32}> { +// CHECK: ^bb0(%[[VAL_0:.*]]: !llvm.ptr): +^bb0(%0: !fir.ref>): +// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32 + %1 = fir.field_index data, !fir.type<_QFdeclare_mapperTmy_type{data:i32}> +// CHECK: %[[VAL_2:.*]] = llvm.getelementptr %[[VAL_0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)> + %2 = fir.coordinate_of %0, %1 : (!fir.ref>, !fir.field) -> !fir.ref +// CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "var%[[VAL_4:.*]]"} + %3 = omp.map.info var_ptr(%2 : !fir.ref, i32) map_clauses(tofrom) capture(ByRef) -> !fir.ref {name = "var%data"} +// CHECK: %[[VAL_5:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !llvm.ptr, !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)>) map_clauses(tofrom) capture(ByRef) members(%[[VAL_3]] : [0] : !llvm.ptr) -> !llvm.ptr {name = "var", partial_map = true} + %4 = omp.map.info var_ptr(%0 : !fir.ref>, !fir.type<_QFdeclare_mapperTmy_type{data:i32}>) map_clauses(tofrom) capture(ByRef) members(%3 : [0] : !fir.ref) -> !fir.ref> {name = "var", partial_map = true} +// CHECK: omp.declare_mapper.info map_entries(%[[VAL_5]], %[[VAL_3]] : !llvm.ptr, !llvm.ptr) + omp.declare_mapper.info map_entries(%4, %3 : !fir.ref>, !fir.ref) +// CHECK: } +} diff --git a/flang/test/Lower/OpenMP/Todo/map-mapper.f90 b/flang/test/Lower/OpenMP/Todo/map-mapper.f90 deleted file mode 100644 index 9554ffd5fda7b..0000000000000 --- a/flang/test/Lower/OpenMP/Todo/map-mapper.f90 +++ /dev/null @@ -1,16 +0,0 @@ -! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=50 %s 2>&1 | FileCheck %s -program p - integer, parameter :: n = 256 - real(8) :: a(256) - !! TODO: Add declare mapper, when it works to lower this construct - !!type t1 - !! integer :: x - !!end type t1 - !!!$omp declare mapper(xx : t1 :: nn) map(nn, nn%x) - !$omp target map(mapper(xx), from:a) -!CHECK: not yet implemented: Support for mapper modifiers is not implemented yet - do i=1,n - a(i) = 4.2 - end do - !$omp end target -end program p diff --git a/flang/test/Lower/OpenMP/Todo/omp-declare-mapper.f90 b/flang/test/Lower/OpenMP/Todo/omp-declare-mapper.f90 deleted file mode 100644 index 5ae48ff736048..0000000000000 --- a/flang/test/Lower/OpenMP/Todo/omp-declare-mapper.f90 +++ /dev/null @@ -1,47 +0,0 @@ -! This test checks lowering of OpenMP declare mapper Directive. - -! RUN: split-file %s %t -! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-1.f90 2>&1 | FileCheck %t/omp-declare-mapper-1.f90 -! RUN not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-2.f90 2>&1 | FileCheck %t/omp-declare-mapper-2.f90 - -!--- omp-declare-mapper-1.f90 -subroutine declare_mapper_1 - integer,parameter :: nvals = 250 - type my_type - integer :: num_vals - integer, allocatable :: values(:) - end type - - type my_type2 - type (my_type) :: my_type_var - type (my_type) :: temp - real,dimension(nvals) :: unmapped - real,dimension(nvals) :: arr - end type - type (my_type2) :: t - real :: x, y(nvals) - !$omp declare mapper (my_type :: var) map (var, var%values (1:var%num_vals)) -!CHECK: not yet implemented: OpenMPDeclareMapperConstruct -end subroutine declare_mapper_1 - - -!--- omp-declare-mapper-2.f90 -subroutine declare_mapper_2 - integer,parameter :: nvals = 250 - type my_type - integer :: num_vals - integer, allocatable :: values(:) - end type - - type my_type2 - type (my_type) :: my_type_var - type (my_type) :: temp - real,dimension(nvals) :: unmapped - real,dimension(nvals) :: arr - end type - type (my_type2) :: t - real :: x, y(nvals) - !$omp declare mapper (my_mapper : my_type2 :: v) map (v%arr, x, y(:)) & - !$omp& map (alloc : v%temp) -!CHECK: not yet implemented: OpenMPDeclareMapperConstruct -end subroutine declare_mapper_2 diff --git a/flang/test/Lower/OpenMP/declare-mapper.f90 b/flang/test/Lower/OpenMP/declare-mapper.f90 new file mode 100644 index 0000000000000..fa7f23c182a68 --- /dev/null +++ b/flang/test/Lower/OpenMP/declare-mapper.f90 @@ -0,0 +1,145 @@ +! This test checks lowering of OpenMP declare mapper Directive. + +! RUN: split-file %s %t +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-1.f90 -o - | FileCheck %t/omp-declare-mapper-1.f90 +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-2.f90 -o - | FileCheck %t/omp-declare-mapper-2.f90 +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-3.f90 -o - | FileCheck %t/omp-declare-mapper-3.f90 + +!--- omp-declare-mapper-1.f90 +subroutine declare_mapper_1 + integer, parameter :: nvals = 250 + type my_type + integer :: num_vals + integer, allocatable :: values(:) + end type + + type my_type2 + type(my_type) :: my_type_var + type(my_type) :: temp + real, dimension(nvals) :: unmapped + real, dimension(nvals) :: arr + end type + type(my_type2) :: t + real :: x, y(nvals) + !CHECK:omp.declare_mapper @[[MY_TYPE_MAPPER:_QQFdeclare_mapper_1my_type\.default]] : [[MY_TYPE:!fir\.type<_QFdeclare_mapper_1Tmy_type\{num_vals:i32,values:!fir\.box>>\}>]] { + !CHECK: ^bb0(%[[VAL_0:.*]]: !fir.ref<[[MY_TYPE]]>): + !CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFdeclare_mapper_1Evar"} : (!fir.ref<[[MY_TYPE]]>) -> (!fir.ref<[[MY_TYPE]]>, !fir.ref<[[MY_TYPE]]>) + !CHECK: %[[VAL_2:.*]] = hlfir.designate %[[VAL_1]]#0{"values"} {fortran_attrs = #fir.var_attrs} : (!fir.ref<[[MY_TYPE]]>) -> !fir.ref>>> + !CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> + !CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.box>>) -> !fir.heap> + !CHECK: %[[VAL_5:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_6:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_5]] : (!fir.box>>, index) -> (index, index, index) + !CHECK: %[[VAL_7:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_8:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_9:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_10:.*]] = arith.subi %[[VAL_9]], %[[VAL_6]]#0 : index + !CHECK: %[[VAL_11:.*]] = hlfir.designate %[[VAL_1]]#0{"num_vals"} : (!fir.ref<[[MY_TYPE]]>) -> !fir.ref + !CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_11]] : !fir.ref + !CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i32) -> i64 + !CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i64) -> index + !CHECK: %[[VAL_15:.*]] = arith.subi %[[VAL_14]], %[[VAL_6]]#0 : index + !CHECK: %[[VAL_16:.*]] = omp.map.bounds lower_bound(%[[VAL_10]] : index) upper_bound(%[[VAL_15]] : index) extent(%[[VAL_6]]#1 : index) stride(%[[VAL_8]] : index) start_idx(%[[VAL_6]]#0 : index) + !CHECK: %[[VAL_17:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_18:.*]] = fir.coordinate_of %[[VAL_1]]#0, %[[VAL_17]] : (!fir.ref<[[MY_TYPE]]>, index) -> !fir.ref>>> + !CHECK: %[[VAL_19:.*]] = fir.box_offset %[[VAL_18]] base_addr : (!fir.ref>>>) -> !fir.llvm_ptr>> + !CHECK: %[[VAL_20:.*]] = omp.map.info var_ptr(%[[VAL_18]] : !fir.ref>>>, i32) var_ptr_ptr(%[[VAL_19]] : !fir.llvm_ptr>>) map_clauses(tofrom) capture(ByRef) bounds(%[[VAL_16]]) -> !fir.llvm_ptr>> {name = ""} + !CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_18]] : !fir.ref>>>, !fir.box>>) map_clauses(to) capture(ByRef) -> !fir.ref>>> {name = "var%[[VAL_22:.*]](1:var%[[VAL_23:.*]])"} + !CHECK: %[[VAL_24:.*]] = omp.map.info var_ptr(%[[VAL_1]]#1 : !fir.ref<[[MY_TYPE]]>, [[MY_TYPE]]) map_clauses(tofrom) capture(ByRef) members(%[[VAL_21]], %[[VAL_20]] : [1], [1, 0] : !fir.ref>>>, !fir.llvm_ptr>>) -> !fir.ref<[[MY_TYPE]]> {name = "var"} + !CHECK: omp.declare_mapper.info map_entries(%[[VAL_24]], %[[VAL_21]], %[[VAL_20]] : !fir.ref<[[MY_TYPE]]>, !fir.ref>>>, !fir.llvm_ptr>>) + !CHECK: } + !$omp declare mapper (my_type :: var) map (var, var%values (1:var%num_vals)) +end subroutine declare_mapper_1 + +!--- omp-declare-mapper-2.f90 +subroutine declare_mapper_2 + integer, parameter :: nvals = 250 + type my_type + integer :: num_vals + integer, allocatable :: values(:) + end type + + type my_type2 + type(my_type) :: my_type_var + type(my_type) :: temp + real, dimension(nvals) :: unmapped + real, dimension(nvals) :: arr + end type + type(my_type2) :: t + real :: x, y(nvals) + !CHECK:omp.declare_mapper @[[MY_TYPE_MAPPER:_QQFdeclare_mapper_2my_mapper]] : [[MY_TYPE:!fir\.type<_QFdeclare_mapper_2Tmy_type2\{my_type_var:!fir\.type<_QFdeclare_mapper_2Tmy_type\{num_vals:i32,values:!fir\.box>>\}>,temp:!fir\.type<_QFdeclare_mapper_2Tmy_type\{num_vals:i32,values:!fir\.box>>\}>,unmapped:!fir\.array<250xf32>,arr:!fir\.array<250xf32>\}>]] { + !CHECK: ^bb0(%[[VAL_0:.*]]: !fir.ref<[[MY_TYPE]]>): + !CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFdeclare_mapper_2Ev"} : (!fir.ref<[[MY_TYPE]]>) -> (!fir.ref<[[MY_TYPE]]>, !fir.ref<[[MY_TYPE]]>) + !CHECK: %[[VAL_2:.*]] = arith.constant 250 : index + !CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1> + !CHECK: %[[VAL_4:.*]] = hlfir.designate %[[VAL_1]]#0{"arr"} shape %[[VAL_3]] : (!fir.ref<[[MY_TYPE]]>, !fir.shape<1>) -> !fir.ref> + !CHECK: %[[VAL_5:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_6:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_7:.*]] = arith.subi %[[VAL_2]], %[[VAL_5]] : index + !CHECK: %[[VAL_8:.*]] = omp.map.bounds lower_bound(%[[VAL_6]] : index) upper_bound(%[[VAL_7]] : index) extent(%[[VAL_2]] : index) stride(%[[VAL_5]] : index) start_idx(%[[VAL_5]] : index) + !CHECK: %[[VAL_9:.*]] = omp.map.info var_ptr(%[[VAL_4]] : !fir.ref>, !fir.array<250xf32>) map_clauses(tofrom) capture(ByRef) bounds(%[[VAL_8]]) -> !fir.ref> {name = "v%[[VAL_10:.*]]"} + !CHECK: %[[VAL_11:.*]] = hlfir.designate %[[VAL_1]]#0{"temp"} : (!fir.ref<[[MY_TYPE]]>) -> !fir.ref>>}>> + !CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref>>}>>, !fir.type<_QFdeclare_mapper_2Tmy_type{num_vals:i32,values:!fir.box>>}>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref>>}>> {name = "v%[[VAL_13:.*]]"} + !CHECK: %[[VAL_14:.*]] = omp.map.info var_ptr(%[[VAL_1]]#1 : !fir.ref<[[MY_TYPE]]>, [[MY_TYPE]]) map_clauses(tofrom) capture(ByRef) members(%[[VAL_9]], %[[VAL_12]] : [3], [1] : !fir.ref>, !fir.ref>>}>>) -> !fir.ref<[[MY_TYPE]]> {name = "v", partial_map = true} + !CHECK: omp.declare_mapper.info map_entries(%[[VAL_14]], %[[VAL_9]], %[[VAL_12]] : !fir.ref<[[MY_TYPE]]>, !fir.ref>, !fir.ref>>}>>) + !CHECK: } + !$omp declare mapper (my_mapper : my_type2 :: v) map (v%arr) map (alloc : v%temp) +end subroutine declare_mapper_2 + +!--- omp-declare-mapper-3.f90 +subroutine declare_mapper_3 + type my_type + integer :: num_vals + integer, allocatable :: values(:) + end type + + type my_type2 + type(my_type) :: my_type_var + real, dimension(250) :: arr + end type + + !CHECK: omp.declare_mapper @[[MY_TYPE_MAPPER2:_QQFdeclare_mapper_3my_mapper2]] : [[MY_TYPE2:!fir\.type<_QFdeclare_mapper_3Tmy_type2\{my_type_var:!fir\.type<_QFdeclare_mapper_3Tmy_type\{num_vals:i32,values:!fir\.box>>}>,arr:!fir\.array<250xf32>}>]] { + !CHECK: ^bb0(%[[VAL_0:.*]]: !fir.ref<[[MY_TYPE2]]>): + !CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFdeclare_mapper_3Ev"} : (!fir.ref<[[MY_TYPE2]]>) -> (!fir.ref<[[MY_TYPE2]]>, !fir.ref<[[MY_TYPE2]]>) + !CHECK: %[[VAL_2:.*]] = hlfir.designate %[[VAL_1]]#0{"my_type_var"} : (!fir.ref<[[MY_TYPE2]]>) -> !fir.ref<[[MY_TYPE:!fir\.type<_QFdeclare_mapper_3Tmy_type\{num_vals:i32,values:!fir\.box>>}>]]> + !CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<[[MY_TYPE]]>, [[MY_TYPE]]) mapper(@[[MY_TYPE_MAPPER:_QQFdeclare_mapper_3my_mapper]]) map_clauses(tofrom) capture(ByRef) -> !fir.ref<[[MY_TYPE]]> {name = "v%[[VAL_4:.*]]"} + !CHECK: %[[VAL_5:.*]] = arith.constant 250 : index + !CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1> + !CHECK: %[[VAL_7:.*]] = hlfir.designate %[[VAL_1]]#0{"arr"} shape %[[VAL_6]] : (!fir.ref<[[MY_TYPE2]]>, !fir.shape<1>) -> !fir.ref> + !CHECK: %[[VAL_8:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_9:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_10:.*]] = arith.subi %[[VAL_5]], %[[VAL_8]] : index + !CHECK: %[[VAL_11:.*]] = omp.map.bounds lower_bound(%[[VAL_9]] : index) upper_bound(%[[VAL_10]] : index) extent(%[[VAL_5]] : index) stride(%[[VAL_8]] : index) start_idx(%[[VAL_8]] : index) + !CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_7]] : !fir.ref>, !fir.array<250xf32>) map_clauses(tofrom) capture(ByRef) bounds(%[[VAL_11]]) -> !fir.ref> {name = "v%[[VAL_13:.*]]"} + !CHECK: %[[VAL_14:.*]] = omp.map.info var_ptr(%[[VAL_1]]#1 : !fir.ref<[[MY_TYPE2]]>, [[MY_TYPE2]]) map_clauses(tofrom) capture(ByRef) members(%[[VAL_3]], %[[VAL_12]] : [0], [1] : !fir.ref<[[MY_TYPE]]>, !fir.ref>) -> !fir.ref<[[MY_TYPE2]]> {name = "v", partial_map = true} + !CHECK: omp.declare_mapper.info map_entries(%[[VAL_14]], %[[VAL_3]], %[[VAL_12]] : !fir.ref<[[MY_TYPE2]]>, !fir.ref<[[MY_TYPE]]>, !fir.ref>) + !CHECK: } + + !CHECK: omp.declare_mapper @[[MY_TYPE_MAPPER]] : [[MY_TYPE]] { + !CHECK: ^bb0(%[[VAL_0:.*]]: !fir.ref<[[MY_TYPE]]>): + !CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFdeclare_mapper_3Evar"} : (!fir.ref<[[MY_TYPE]]>) -> (!fir.ref<[[MY_TYPE]]>, !fir.ref<[[MY_TYPE]]>) + !CHECK: %[[VAL_2:.*]] = hlfir.designate %[[VAL_1]]#0{"values"} {fortran_attrs = #fir.var_attrs} : (!fir.ref<[[MY_TYPE]]>) -> !fir.ref>>> + !CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> + !CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.box>>) -> !fir.heap> + !CHECK: %[[VAL_5:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_6:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_5]] : (!fir.box>>, index) -> (index, index, index) + !CHECK: %[[VAL_7:.*]] = arith.constant 0 : index + !CHECK: %[[VAL_8:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_9:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_10:.*]] = arith.subi %[[VAL_9]], %[[VAL_6]]#0 : index + !CHECK: %[[VAL_11:.*]] = hlfir.designate %[[VAL_1]]#0{"num_vals"} : (!fir.ref<[[MY_TYPE]]>) -> !fir.ref + !CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_11]] : !fir.ref + !CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (i32) -> i64 + !CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i64) -> index + !CHECK: %[[VAL_15:.*]] = arith.subi %[[VAL_14]], %[[VAL_6]]#0 : index + !CHECK: %[[VAL_16:.*]] = omp.map.bounds lower_bound(%[[VAL_10]] : index) upper_bound(%[[VAL_15]] : index) extent(%[[VAL_6]]#1 : index) stride(%[[VAL_8]] : index) start_idx(%[[VAL_6]]#0 : index) + !CHECK: %[[VAL_17:.*]] = arith.constant 1 : index + !CHECK: %[[VAL_18:.*]] = fir.coordinate_of %[[VAL_1]]#0, %[[VAL_17]] : (!fir.ref<[[MY_TYPE]]>, index) -> !fir.ref>>> + !CHECK: %[[VAL_19:.*]] = fir.box_offset %[[VAL_18]] base_addr : (!fir.ref>>>) -> !fir.llvm_ptr>> + !CHECK: %[[VAL_20:.*]] = omp.map.info var_ptr(%[[VAL_18]] : !fir.ref>>>, i32) var_ptr_ptr(%[[VAL_19]] : !fir.llvm_ptr>>) map_clauses(tofrom) capture(ByRef) bounds(%[[VAL_16]]) -> !fir.llvm_ptr>> {name = ""} + !CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_18]] : !fir.ref>>>, !fir.box>>) map_clauses(to) capture(ByRef) -> !fir.ref>>> {name = "var%[[VAL_22:.*]](1:var%[[VAL_23:.*]])"} + !CHECK: %[[VAL_24:.*]] = omp.map.info var_ptr(%[[VAL_1]]#1 : !fir.ref<[[MY_TYPE]]>, [[MY_TYPE]]) map_clauses(tofrom) capture(ByRef) members(%[[VAL_21]], %[[VAL_20]] : [1], [1, 0] : !fir.ref>>>, !fir.llvm_ptr>>) -> !fir.ref<[[MY_TYPE]]> {name = "var"} + !CHECK: omp.declare_mapper.info map_entries(%[[VAL_24]], %[[VAL_21]], %[[VAL_20]] : !fir.ref<[[MY_TYPE]]>, !fir.ref>>>, !fir.llvm_ptr>>) + !CHECK: } + !$omp declare mapper (my_mapper : my_type :: var) map (var, var%values (1:var%num_vals)) + !$omp declare mapper (my_mapper2 : my_type2 :: v) map (mapper(my_mapper) : v%my_type_var) map (tofrom : v%arr) +end subroutine declare_mapper_3 diff --git a/flang/test/Lower/OpenMP/map-mapper.f90 b/flang/test/Lower/OpenMP/map-mapper.f90 new file mode 100644 index 0000000000000..0d8fe7344bfab --- /dev/null +++ b/flang/test/Lower/OpenMP/map-mapper.f90 @@ -0,0 +1,30 @@ +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %s -o - | FileCheck %s +program p + integer, parameter :: n = 256 + type t1 + integer :: x(256) + end type t1 + + !$omp declare mapper(xx : t1 :: nn) map(to: nn, nn%x) + !$omp declare mapper(t1 :: nn) map(from: nn) + + !CHECK-LABEL: omp.declare_mapper @_QQFt1.default : !fir.type<_QFTt1{x:!fir.array<256xi32>}> + !CHECK-LABEL: omp.declare_mapper @_QQFxx : !fir.type<_QFTt1{x:!fir.array<256xi32>}> + + type(t1) :: a, b + !CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%{{.*}} : {{.*}}, {{.*}}) mapper(@_QQFxx) map_clauses(tofrom) capture(ByRef) -> {{.*}} {name = "a"} + !CHECK: omp.target map_entries(%[[MAP_A]] -> %{{.*}}, %{{.*}} -> %{{.*}} : {{.*}}, {{.*}}) { + !$omp target map(mapper(xx) : a) + do i = 1, n + a%x(i) = i + end do + !$omp end target + + !CHECK: %[[MAP_B:.*]] = omp.map.info var_ptr(%{{.*}} : {{.*}}, {{.*}}) mapper(@_QQFt1.default) map_clauses(tofrom) capture(ByRef) -> {{.*}} {name = "b"} + !CHECK: omp.target map_entries(%[[MAP_B]] -> %{{.*}}, %{{.*}} -> %{{.*}} : {{.*}}, {{.*}}) { + !$omp target map(mapper(default) : b) + do i = 1, n + b%x(i) = i + end do + !$omp end target +end program p diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index a4f6671a59789..b38fe7520e293 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -648,6 +648,7 @@ if(LIBC_TYPES_HAS_FLOAT16) list(APPEND TARGET_LIBM_ENTRYPOINTS # math.h C23 _Float16 entrypoints libc.src.math.asinf16 + libc.src.math.acosf16 libc.src.math.canonicalizef16 libc.src.math.ceilf16 libc.src.math.copysignf16 diff --git a/libc/docs/headers/math/index.rst b/libc/docs/headers/math/index.rst index 3e45e3e618abb..3010f9457ae11 100644 --- a/libc/docs/headers/math/index.rst +++ b/libc/docs/headers/math/index.rst @@ -250,7 +250,7 @@ Higher Math Functions +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | | (float) | (double) | (long double) | (float16) | (float128) | C23 Definition Section | C23 Error Handling Section | +===========+==================+=================+========================+======================+========================+========================+============================+ -| acos | |check| | | | | | 7.12.4.1 | F.10.1.1 | +| acos | |check| | | | |check| | | 7.12.4.1 | F.10.1.1 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | acosh | |check| | | | | | 7.12.5.1 | F.10.2.1 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ diff --git a/libc/include/math.yaml b/libc/include/math.yaml index b98bc55f6cc53..a66f981030864 100644 --- a/libc/include/math.yaml +++ b/libc/include/math.yaml @@ -14,6 +14,13 @@ functions: return_type: float arguments: - type: float + - name: acosf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: acoshf standards: - stdc diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt index 82551a4b57f45..f18a73d46f9aa 100644 --- a/libc/src/math/CMakeLists.txt +++ b/libc/src/math/CMakeLists.txt @@ -42,6 +42,8 @@ endfunction() add_math_entrypoint_object(acos) add_math_entrypoint_object(acosf) +add_math_entrypoint_object(acosf16) + add_math_entrypoint_object(acosh) add_math_entrypoint_object(acoshf) diff --git a/libc/src/math/acosf16.h b/libc/src/math/acosf16.h new file mode 100644 index 0000000000000..df30be2c537c9 --- /dev/null +++ b/libc/src/math/acosf16.h @@ -0,0 +1,21 @@ +//===-- Implementation header for acosf16 -----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_ACOSF16_H +#define LLVM_LIBC_SRC_MATH_ACOSF16_H + +#include "src/__support/macros/config.h" +#include "src/__support/macros/properties/types.h" + +namespace LIBC_NAMESPACE_DECL { + +float16 acosf16(float16 x); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_MATH_ACOSF16_H diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 537d5b5ad94ed..079e03d953d1c 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -4034,6 +4034,26 @@ add_entrypoint_object( .inv_trigf_utils ) +add_entrypoint_object( + acosf16 + SRCS + acosf16.cpp + HDRS + ../acosf16.h + DEPENDS + libc.hdr.errno_macros + libc.hdr.fenv_macros + libc.src.__support.FPUtil.cast + libc.src.__support.FPUtil.fenv_impl + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.except_value_utils + libc.src.__support.FPUtil.multiply_add + libc.src.__support.FPUtil.polyeval + libc.src.__support.FPUtil.sqrt + libc.src.__support.macros.optimization + libc.src.__support.macros.properties.types +) + add_entrypoint_object( atanf SRCS diff --git a/libc/src/math/generic/acosf16.cpp b/libc/src/math/generic/acosf16.cpp new file mode 100644 index 0000000000000..94f6dfddce20d --- /dev/null +++ b/libc/src/math/generic/acosf16.cpp @@ -0,0 +1,146 @@ +//===-- Half-precision acosf16(x) function --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception. +// +// +//===----------------------------------------------------------------------===// + +#include "src/math/acosf16.h" +#include "hdr/errno_macros.h" +#include "hdr/fenv_macros.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/PolyEval.h" +#include "src/__support/FPUtil/cast.h" +#include "src/__support/FPUtil/except_value_utils.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/FPUtil/sqrt.h" +#include "src/__support/macros/optimization.h" + +namespace LIBC_NAMESPACE_DECL { + +// Generated by Sollya using the following command: +// > round(pi/2, D, RN); +// > round(pi, D, RN); +static constexpr float PI_2 = 0x1.921fb54442d18p0f; +static constexpr float PI = 0x1.921fb54442d18p1f; + +static constexpr size_t N_EXCEPTS = 2; + +static constexpr fputil::ExceptValues ACOSF16_EXCEPTS{{ + // (input, RZ output, RU offset, RD offset, RN offset) + {0xacaf, 0x3e93, 1, 0, 0}, + {0xb874, 0x4052, 1, 0, 1}, +}}; + +LLVM_LIBC_FUNCTION(float16, acosf16, (float16 x)) { + using FPBits = fputil::FPBits; + FPBits xbits(x); + + uint16_t x_u = xbits.uintval(); + uint16_t x_abs = x_u & 0x7fff; + uint16_t x_sign = x_u >> 15; + float xf = x; + + // Handle exceptional values + if (auto r = ACOSF16_EXCEPTS.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); + + // |x| == 0x1p0, x is 1 or -1 + // if x is (-)1, return pi, else + // if x is (+)1, return 0 + if (LIBC_UNLIKELY(x_abs == 0x3c00)) + return fputil::cast(x_sign ? 0x1.921fb54442d18p1 : 0.0f); + + // |x| > 0x1p0, |x| > 1, or x is NaN. + if (LIBC_UNLIKELY(x_abs > 0x3c00)) { + // acosf16(NaN) = NaN + if (xbits.is_nan()) { + if (xbits.is_signaling_nan()) { + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + return x; + } + + // 1 < |x| <= +/-inf + fputil::raise_except_if_required(FE_INVALID); + fputil::set_errno_if_required(EDOM); + + return FPBits::quiet_nan().get_val(); + } + + float xsq = xf * xf; + + // |x| <= 0x1p-1, |x| <= 0.5 + if (x_abs <= 0x3800) { + // if x is 0, return pi/2 + if (LIBC_UNLIKELY(x_abs == 0)) + return fputil::cast(PI_2); + + // Note that: acos(x) = pi/2 + asin(-x) = pi/2 - asin(x) + // Degree-6 minimax polynomial of asin(x) generated by Sollya with: + // > P = fpminimax(asin(x)/x, [|0, 2, 4, 6, 8|], [|SG...|], [0, 0.5]); + float interm = + fputil::polyeval(xsq, 0x1.000002p0f, 0x1.554c2ap-3f, 0x1.3541ccp-4f, + 0x1.43b2d6p-5f, 0x1.a0d73ep-5f); + return fputil::cast(fputil::multiply_add(-xf, interm, PI_2)); + } + + // When |x| > 0.5, assume that 0.5 < |x| <= 1 + // + // Step-by-step range-reduction proof: + // 1: Let y = asin(x), such that, x = sin(y) + // 2: From complimentary angle identity: + // x = sin(y) = cos(pi/2 - y) + // 3: Let z = pi/2 - y, such that x = cos(z) + // 4: From double angle formula; cos(2A) = 1 - sin^2(A): + // z = 2A, z/2 = A + // cos(z) = 1 - 2 * sin*2(z/2) + // 5: Make sin(z/2) subject of the formula: + // sin(z/2) = sqrt((1 - cos(z))/2) + // 6: Recall [3]; x = cos(z). Therefore: + // sin(z/2) = sqrt((1 - x)/2) + // 7: Let u = (1 - x)/2 + // 8: Therefore: + // asin(sqrt(u)) = z/2 + // 2 * asin(sqrt(u)) = z + // 9: Recall [3]; z = pi/2 - y. Therefore: + // y = pi/2 - z + // y = pi/2 - 2 * asin(sqrt(u)) + // 10: Recall [1], y = asin(x). Therefore: + // asin(x) = pi/2 - 2 * asin(sqrt(u)) + // 11: Recall that: acos(x) = pi/2 + asin(-x) = pi/2 - asin(x) + // Therefore: + // acos(x) = pi/2 - (pi/2 - 2 * asin(sqrt(u))) + // acos(x) = 2 * asin(sqrt(u)) + // + // THE RANGE REDUCTION, HOW? + // 12: Recall [7], u = (1 - x)/2 + // 13: Since 0.5 < x <= 1, therefore: + // 0 <= u <= 0.25 and 0 <= sqrt(u) <= 0.5 + // + // Hence, we can reuse the same [0, 0.5] domain polynomial approximation for + // Step [11] as `sqrt(u)` is in range. + // When -1 < x <= -0.5, the identity: + // acos(x) = pi - acos(-x) + // allows us to compute for the negative x value (lhs) + // with a positive x value instead (rhs). + + float xf_abs = (xf < 0 ? -xf : xf); + float u = fputil::multiply_add(-0.5f, xf_abs, 0.5f); + float sqrt_u = fputil::sqrt(u); + + // Degree-6 minimax polynomial of asin(x) generated by Sollya with: + // > P = fpminimax(asin(x)/x, [|0, 2, 4, 6, 8|], [|SG...|], [0, 0.5]); + float asin_sqrt_u = + sqrt_u * fputil::polyeval(u, 0x1.000002p0f, 0x1.554c2ap-3f, + 0x1.3541ccp-4f, 0x1.43b2d6p-5f, 0x1.a0d73ep-5f); + + return fputil::cast( + x_sign ? fputil::multiply_add(-2.0f, asin_sqrt_u, PI) : 2 * asin_sqrt_u); +} +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/generic/tanf16.cpp b/libc/src/math/generic/tanf16.cpp index 48aa51e456a8a..08eaee2d06f86 100644 --- a/libc/src/math/generic/tanf16.cpp +++ b/libc/src/math/generic/tanf16.cpp @@ -19,9 +19,9 @@ namespace LIBC_NAMESPACE_DECL { -constexpr size_t N_EXCEPTS = 9; +static constexpr size_t N_EXCEPTS = 9; -constexpr fputil::ExceptValues TANF16_EXCEPTS{{ +static constexpr fputil::ExceptValues TANF16_EXCEPTS{{ // (input, RZ output, RU offset, RD offset, RN offset) {0x2894, 0x2894, 1, 0, 1}, {0x3091, 0x3099, 1, 0, 0}, diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt index 6a3dd8c9deff0..53ddd301900c0 100644 --- a/libc/test/src/math/CMakeLists.txt +++ b/libc/test/src/math/CMakeLists.txt @@ -2210,6 +2210,17 @@ add_fp_unittest( libc.src.__support.FPUtil.fp_bits ) +add_fp_unittest( + acosf16_test + NEED_MPFR + SUITE + libc-math-unittests + SRCS + acosf16_test.cpp + DEPENDS + libc.src.math.acosf16 +) + add_fp_unittest( atanf_test NEED_MPFR diff --git a/libc/test/src/math/acosf16_test.cpp b/libc/test/src/math/acosf16_test.cpp new file mode 100644 index 0000000000000..9624cd6d01f1a --- /dev/null +++ b/libc/test/src/math/acosf16_test.cpp @@ -0,0 +1,42 @@ +//===-- Exhaustive test for asinf16 ---------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/acosf16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" +#include "utils/MPFRWrapper/MPFRUtils.h" + +using LlvmLibcAcosf16Test = LIBC_NAMESPACE::testing::FPTest; + +namespace mpfr = LIBC_NAMESPACE::testing::mpfr; + +// Range: [0, Inf] +static constexpr uint16_t POS_START = 0x0000U; +static constexpr uint16_t POS_STOP = 0x7c00U; + +// Range: [-Inf, 0] +static constexpr uint16_t NEG_START = 0x8000U; +static constexpr uint16_t NEG_STOP = 0xfc00U; + +TEST_F(LlvmLibcAcosf16Test, PositiveRange) { + for (uint16_t v = POS_START; v <= POS_STOP; ++v) { + float16 x = FPBits(v).get_val(); + + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, x, + LIBC_NAMESPACE::acosf16(x), 0.5); + } +} + +TEST_F(LlvmLibcAcosf16Test, NegativeRange) { + for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { + float16 x = FPBits(v).get_val(); + + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, x, + LIBC_NAMESPACE::acosf16(x), 0.5); + } +} diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt index 14447728fb18a..6f94440d826d9 100644 --- a/libc/test/src/math/smoke/CMakeLists.txt +++ b/libc/test/src/math/smoke/CMakeLists.txt @@ -3980,6 +3980,17 @@ add_fp_unittest( libc.src.__support.FPUtil.fp_bits ) +add_fp_unittest( + acosf16_test + SUITE + libc-math-smoke-tests + SRCS + acosf16_test.cpp + DEPENDS + libc.src.errno.errno + libc.src.math.acosf16 +) + add_fp_unittest( atanf_test SUITE diff --git a/libc/test/src/math/smoke/acosf16_test.cpp b/libc/test/src/math/smoke/acosf16_test.cpp new file mode 100644 index 0000000000000..5518a0bace1d4 --- /dev/null +++ b/libc/test/src/math/smoke/acosf16_test.cpp @@ -0,0 +1,39 @@ +//===-- Unittests for acosf16 ---------------------------------------------===// +// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception. +// +//===----------------------------------------------------------------------===// + +#include "src/errno/libc_errno.h" +#include "src/math/acosf16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" + +using LlvmLibcAcosf16Test = LIBC_NAMESPACE::testing::FPTest; + +TEST_F(LlvmLibcAcosf16Test, SpecialNumbers) { + LIBC_NAMESPACE::libc_errno = 0; + EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::acosf16(aNaN)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::acosf16(sNaN), FE_INVALID); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::acosf16(inf)); + EXPECT_MATH_ERRNO(EDOM); + + EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::acosf16(neg_inf)); + EXPECT_MATH_ERRNO(EDOM); + + EXPECT_FP_EQ(zero, LIBC_NAMESPACE::acosf16(1.0f)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::acosf16(2.0f)); + EXPECT_MATH_ERRNO(EDOM); + + EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::acosf16(-2.0f)); + EXPECT_MATH_ERRNO(EDOM); +} diff --git a/libclc/amdgcn/lib/cl_khr_int64_extended_atomics/minmax_helpers.ll b/libclc/amdgcn/lib/cl_khr_int64_extended_atomics/minmax_helpers.ll index 98f1f54718a1f..7f12556c0abbc 100644 --- a/libclc/amdgcn/lib/cl_khr_int64_extended_atomics/minmax_helpers.ll +++ b/libclc/amdgcn/lib/cl_khr_int64_extended_atomics/minmax_helpers.ll @@ -1,9 +1,3 @@ -#if __clang_major__ >= 7 -target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" -#else -target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" -#endif - define i64 @__clc__sync_fetch_and_min_global_8(i64 addrspace(1)* nocapture %ptr, i64 %value) nounwind alwaysinline { entry: %0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %value seq_cst diff --git a/libclc/clc/include/clc/clc_convert.h b/libclc/clc/include/clc/clc_convert.h index 20bbd57540b30..12cd988d59c54 100644 --- a/libclc/clc/include/clc/clc_convert.h +++ b/libclc/clc/include/clc/clc_convert.h @@ -1,6 +1,8 @@ #ifndef __CLC_CLC_CONVERT_H__ #define __CLC_CLC_CONVERT_H__ +#include + #define _CLC_CONVERT_DECL(FROM_TYPE, TO_TYPE, SUFFIX) \ _CLC_OVERLOAD _CLC_DECL TO_TYPE __clc_convert_##TO_TYPE##SUFFIX(FROM_TYPE x); diff --git a/libclc/clc/lib/generic/integer/clc_mad_sat.cl b/libclc/clc/lib/generic/integer/clc_mad_sat.cl index 4e559dba2b2f5..530e9c84b10a0 100644 --- a/libclc/clc/lib/generic/integer/clc_mad_sat.cl +++ b/libclc/clc/lib/generic/integer/clc_mad_sat.cl @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,34 +9,23 @@ #include #include -#define __CLC_CONVERT_TY(X, TY) __builtin_convertvector(X, TY) - -// Macro for defining mad_sat variants for char/uchar/short/ushort -// FIXME: Once using __clc_convert_ty, can easily unify scalar and vector defs #define __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE, UP_TYPE, LIT_PREFIX) \ _CLC_OVERLOAD _CLC_DEF TYPE __clc_mad_sat(TYPE x, TYPE y, TYPE z) { \ - return __clc_clamp( \ - (UP_TYPE)__clc_mad24((UP_TYPE)x, (UP_TYPE)y, (UP_TYPE)z), \ - (UP_TYPE)LIT_PREFIX##_MIN, (UP_TYPE)LIT_PREFIX##_MAX); \ - } - -#define __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE, UP_TYPE, LIT_PREFIX) \ - _CLC_OVERLOAD _CLC_DEF TYPE __clc_mad_sat(TYPE x, TYPE y, TYPE z) { \ - UP_TYPE upscaled_mad = __clc_mad24(__CLC_CONVERT_TY(x, UP_TYPE), \ - __CLC_CONVERT_TY(y, UP_TYPE), \ - __CLC_CONVERT_TY(z, UP_TYPE)); \ + UP_TYPE upscaled_mad = \ + __clc_mad24(__clc_convert_##UP_TYPE(x), __clc_convert_##UP_TYPE(y), \ + __clc_convert_##UP_TYPE(z)); \ UP_TYPE clamped_mad = __clc_clamp(upscaled_mad, (UP_TYPE)LIT_PREFIX##_MIN, \ (UP_TYPE)LIT_PREFIX##_MAX); \ - return __CLC_CONVERT_TY(clamped_mad, TYPE); \ + return __clc_convert_##TYPE(clamped_mad); \ } #define __CLC_DEFINE_SIMPLE_MAD_SAT_ALL_TYS(TYPE, UP_TYPE, LIT_PREFIX) \ __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE, UP_TYPE, LIT_PREFIX) \ - __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE##2, UP_TYPE##2, LIT_PREFIX) \ - __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE##3, UP_TYPE##3, LIT_PREFIX) \ - __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE##4, UP_TYPE##4, LIT_PREFIX) \ - __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE##8, UP_TYPE##8, LIT_PREFIX) \ - __CLC_DEFINE_SIMPLE_MAD_SAT_VEC(TYPE##16, UP_TYPE##16, LIT_PREFIX) + __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE##2, UP_TYPE##2, LIT_PREFIX) \ + __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE##3, UP_TYPE##3, LIT_PREFIX) \ + __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE##4, UP_TYPE##4, LIT_PREFIX) \ + __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE##8, UP_TYPE##8, LIT_PREFIX) \ + __CLC_DEFINE_SIMPLE_MAD_SAT(TYPE##16, UP_TYPE##16, LIT_PREFIX) __CLC_DEFINE_SIMPLE_MAD_SAT_ALL_TYS(char, int, CHAR) __CLC_DEFINE_SIMPLE_MAD_SAT_ALL_TYS(uchar, uint, UCHAR) @@ -67,20 +57,13 @@ __CLC_DEFINE_UINTLONG_MAD_SAT_ALL_TYS(ulong, long, ULONG) INTTY mhi = __clc_mul_hi(x, y); \ UINTTY mlo = __clc_as_##UINTTY(x * y); \ SLONGTY m = __clc_upsample(mhi, mlo); \ - m += __CLC_CONVERT_TY(z, SLONGTY); \ + m += __clc_convert_##SLONGTY(z); \ m = __clc_clamp(m, (SLONGTY)INT_MIN, (SLONGTY)INT_MAX); \ - return __CLC_CONVERT_TY(m, INTTY); \ + return __clc_convert_##INTTY(m); \ } -// FIXME: Once using __clc_convert_ty, can easily unify scalar and vector defs #define __CLC_DEFINE_SINT_MAD_SAT_ALL_TYS(INTTY, UINTTY, SLONGTY) \ - _CLC_OVERLOAD _CLC_DEF INTTY __clc_mad_sat(INTTY x, INTTY y, INTTY z) { \ - INTTY mhi = __clc_mul_hi(x, y); \ - UINTTY mlo = __clc_as_##UINTTY(x * y); \ - SLONGTY m = __clc_upsample(mhi, mlo); \ - m += z; \ - return __clc_clamp(m, (SLONGTY)INT_MIN, (SLONGTY)INT_MAX); \ - } \ + __CLC_DEFINE_SINT_MAD_SAT(INTTY, UINTTY, SLONGTY) \ __CLC_DEFINE_SINT_MAD_SAT(INTTY##2, UINTTY##2, SLONGTY##2) \ __CLC_DEFINE_SINT_MAD_SAT(INTTY##3, UINTTY##3, SLONGTY##3) \ __CLC_DEFINE_SINT_MAD_SAT(INTTY##4, UINTTY##4, SLONGTY##4) \ diff --git a/libclc/clc/lib/generic/integer/clc_mul_hi.cl b/libclc/clc/lib/generic/integer/clc_mul_hi.cl index cf4acc5429cb4..28457ac6126dd 100644 --- a/libclc/clc/lib/generic/integer/clc_mul_hi.cl +++ b/libclc/clc/lib/generic/integer/clc_mul_hi.cl @@ -1,31 +1,24 @@ +#include #include #include #include -// TODO: Replace with __clc_convert_ when available -#define __CLC_CONVERT_TY(X, TY) __builtin_convertvector(X, TY) - -#define __CLC_MUL_HI_VEC_IMPL(BGENTYPE, GENTYPE, GENSIZE) \ - _CLC_OVERLOAD _CLC_DEF GENTYPE __clc_mul_hi(GENTYPE x, GENTYPE y) { \ - BGENTYPE large_x = __CLC_CONVERT_TY(x, BGENTYPE); \ - BGENTYPE large_y = __CLC_CONVERT_TY(y, BGENTYPE); \ - BGENTYPE large_mul_hi = (large_x * large_y) >> (BGENTYPE)GENSIZE; \ - return __CLC_CONVERT_TY(large_mul_hi, GENTYPE); \ - } - // For all types EXCEPT long, which is implemented separately #define __CLC_MUL_HI_IMPL(BGENTYPE, GENTYPE, GENSIZE) \ _CLC_OVERLOAD _CLC_DEF GENTYPE __clc_mul_hi(GENTYPE x, GENTYPE y) { \ - return (GENTYPE)(((BGENTYPE)x * (BGENTYPE)y) >> GENSIZE); \ + BGENTYPE large_x = __clc_convert_##BGENTYPE(x); \ + BGENTYPE large_y = __clc_convert_##BGENTYPE(y); \ + BGENTYPE large_mul_hi = (large_x * large_y) >> (BGENTYPE)GENSIZE; \ + return __clc_convert_##GENTYPE(large_mul_hi); \ } #define __CLC_MUL_HI_DEC_IMPL(BTYPE, TYPE, BITS) \ __CLC_MUL_HI_IMPL(BTYPE, TYPE, BITS) \ - __CLC_MUL_HI_VEC_IMPL(BTYPE##2, TYPE##2, BITS) \ - __CLC_MUL_HI_VEC_IMPL(BTYPE##3, TYPE##3, BITS) \ - __CLC_MUL_HI_VEC_IMPL(BTYPE##4, TYPE##4, BITS) \ - __CLC_MUL_HI_VEC_IMPL(BTYPE##8, TYPE##8, BITS) \ - __CLC_MUL_HI_VEC_IMPL(BTYPE##16, TYPE##16, BITS) + __CLC_MUL_HI_IMPL(BTYPE##2, TYPE##2, BITS) \ + __CLC_MUL_HI_IMPL(BTYPE##3, TYPE##3, BITS) \ + __CLC_MUL_HI_IMPL(BTYPE##4, TYPE##4, BITS) \ + __CLC_MUL_HI_IMPL(BTYPE##8, TYPE##8, BITS) \ + __CLC_MUL_HI_IMPL(BTYPE##16, TYPE##16, BITS) _CLC_OVERLOAD _CLC_DEF long __clc_mul_hi(long x, long y) { long f, o, i; @@ -98,8 +91,8 @@ _CLC_OVERLOAD _CLC_DEF ulong __clc_mul_hi(ulong x, ulong y) { f = x_hi * y_hi; \ o = x_hi * y_lo; \ i = x_lo * y_hi; \ - l = __CLC_CONVERT_TY(x_lo * y_lo, UTY); \ - i += __CLC_CONVERT_TY(l >> (UTY)32, TY); \ + l = __clc_convert_##UTY(x_lo * y_lo); \ + i += __clc_convert_##TY(l >> (UTY)32); \ \ return f + (__clc_hadd(o, i) >> (TY)31); \ } @@ -128,5 +121,3 @@ __CLC_MUL_HI_TYPES() #undef __CLC_MUL_HI_LONG_VEC_IMPL #undef __CLC_MUL_HI_DEC_IMPL #undef __CLC_MUL_HI_IMPL -#undef __CLC_MUL_HI_VEC_IMPL -#undef __CLC_CONVERT_TY diff --git a/libclc/clc/lib/generic/integer/clc_upsample.cl b/libclc/clc/lib/generic/integer/clc_upsample.cl index d53ef7240bfc2..b8f884dc9f63c 100644 --- a/libclc/clc/lib/generic/integer/clc_upsample.cl +++ b/libclc/clc/lib/generic/integer/clc_upsample.cl @@ -1,35 +1,31 @@ +#include #include -// TODO: Replace with __clc_convert_ when available -#define __CLC_CONVERT_TY(X, TY) __builtin_convertvector(X, TY) - -#define __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE, GENTYPE, UGENTYPE, GENSIZE) \ +#define __CLC_UPSAMPLE_IMPL(BGENTYPE, GENTYPE, UGENTYPE, GENSIZE) \ _CLC_OVERLOAD _CLC_DEF BGENTYPE __clc_upsample(GENTYPE hi, UGENTYPE lo) { \ - BGENTYPE large_hi = __CLC_CONVERT_TY(hi, BGENTYPE); \ - BGENTYPE large_lo = __CLC_CONVERT_TY(lo, BGENTYPE); \ + BGENTYPE large_hi = __clc_convert_##BGENTYPE(hi); \ + BGENTYPE large_lo = __clc_convert_##BGENTYPE(lo); \ return (large_hi << (BGENTYPE)GENSIZE) | large_lo; \ } -#define __CLC_UPSAMPLE_IMPL(BGENTYPE, GENTYPE, UGENTYPE, GENSIZE) \ - _CLC_OVERLOAD _CLC_DEF BGENTYPE __clc_upsample(GENTYPE hi, UGENTYPE lo) { \ - return ((BGENTYPE)hi << GENSIZE) | lo; \ - } \ - __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE##2, GENTYPE##2, UGENTYPE##2, GENSIZE) \ - __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE##3, GENTYPE##3, UGENTYPE##3, GENSIZE) \ - __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE##4, GENTYPE##4, UGENTYPE##4, GENSIZE) \ - __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE##8, GENTYPE##8, UGENTYPE##8, GENSIZE) \ - __CLC_UPSAMPLE_VEC_IMPL(BGENTYPE##16, GENTYPE##16, UGENTYPE##16, GENSIZE) +#define __CLC_UPSAMPLE_IMPL_ALL_TYS(BGENTYPE, GENTYPE, UGENTYPE, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE, GENTYPE, UGENTYPE, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE##2, GENTYPE##2, UGENTYPE##2, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE##3, GENTYPE##3, UGENTYPE##3, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE##4, GENTYPE##4, UGENTYPE##4, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE##8, GENTYPE##8, UGENTYPE##8, GENSIZE) \ + __CLC_UPSAMPLE_IMPL(BGENTYPE##16, GENTYPE##16, UGENTYPE##16, GENSIZE) #define __CLC_UPSAMPLE_TYPES() \ - __CLC_UPSAMPLE_IMPL(short, char, uchar, 8) \ - __CLC_UPSAMPLE_IMPL(ushort, uchar, uchar, 8) \ - __CLC_UPSAMPLE_IMPL(int, short, ushort, 16) \ - __CLC_UPSAMPLE_IMPL(uint, ushort, ushort, 16) \ - __CLC_UPSAMPLE_IMPL(long, int, uint, 32) \ - __CLC_UPSAMPLE_IMPL(ulong, uint, uint, 32) + __CLC_UPSAMPLE_IMPL_ALL_TYS(short, char, uchar, 8) \ + __CLC_UPSAMPLE_IMPL_ALL_TYS(ushort, uchar, uchar, 8) \ + __CLC_UPSAMPLE_IMPL_ALL_TYS(int, short, ushort, 16) \ + __CLC_UPSAMPLE_IMPL_ALL_TYS(uint, ushort, ushort, 16) \ + __CLC_UPSAMPLE_IMPL_ALL_TYS(long, int, uint, 32) \ + __CLC_UPSAMPLE_IMPL_ALL_TYS(ulong, uint, uint, 32) __CLC_UPSAMPLE_TYPES() #undef __CLC_UPSAMPLE_TYPES +#undef __CLC_UPSAMPLE_IMPL_ALL_TYS #undef __CLC_UPSAMPLE_IMPL -#undef __CLC_CONVERT_TY diff --git a/libclc/cmake/modules/AddLibclc.cmake b/libclc/cmake/modules/AddLibclc.cmake index 5347b0822477b..40e31e0ba4f45 100644 --- a/libclc/cmake/modules/AddLibclc.cmake +++ b/libclc/cmake/modules/AddLibclc.cmake @@ -225,7 +225,8 @@ function(add_libclc_builtin_set) message( FATAL_ERROR "Must provide ARCH, ARCH_SUFFIX, and TRIPLE" ) endif() - set( bytecode_files "" ) + set( bytecode_files ) + set( bytecode_ir_files ) foreach( file IN LISTS ARG_GEN_FILES ARG_LIB_FILES ) # We need to take each file and produce an absolute input file, as well # as a unique architecture-specific output file. We deal with a mix of @@ -263,9 +264,23 @@ function(add_libclc_builtin_set) "${ARG_COMPILE_FLAGS}" -I${CMAKE_CURRENT_SOURCE_DIR}/${file_dir} DEPENDENCIES ${input_file_dep} ) - list( APPEND bytecode_files ${output_file} ) + + # Collect all files originating in LLVM IR separately + get_filename_component( file_ext ${file} EXT ) + if( ${file_ext} STREQUAL ".ll" ) + list( APPEND bytecode_ir_files ${output_file} ) + else() + list( APPEND bytecode_files ${output_file} ) + endif() endforeach() + # Prepend all LLVM IR files to the list so they are linked into the final + # bytecode modules first. This helps to suppress unnecessary warnings + # regarding different data layouts while linking. Any LLVM IR files without a + # data layout will (silently) be given the first data layout the linking + # process comes across. + list( PREPEND bytecode_files ${bytecode_ir_files} ) + set( builtins_comp_lib_tgt builtins.comp.${ARG_ARCH_SUFFIX} ) add_custom_target( ${builtins_comp_lib_tgt} DEPENDS ${bytecode_files} @@ -351,8 +366,9 @@ function(add_libclc_builtin_set) add_custom_target( prepare-${obj_suffix} ALL DEPENDS ${obj_suffix} ) set_target_properties( "prepare-${obj_suffix}" PROPERTIES FOLDER "libclc/Device IR/Prepare" ) - # nvptx-- targets don't include workitem builtins - if( NOT ARG_TRIPLE MATCHES ".*ptx.*--$" ) + # nvptx-- targets don't include workitem builtins, and clspv targets don't + # include all OpenCL builtins + if( NOT ARG_ARCH MATCHES "^(nvptx|clspv)(64)?$" ) add_test( NAME external-calls-${obj_suffix} COMMAND ./check_external_calls.sh ${CMAKE_CURRENT_BINARY_DIR}/${obj_suffix} ${LLVM_TOOLS_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) diff --git a/libclc/generic/lib/math/clc_exp10.cl b/libclc/generic/lib/math/clc_exp10.cl index 0eb53d013a85a..4f839a9815ac0 100644 --- a/libclc/generic/lib/math/clc_exp10.cl +++ b/libclc/generic/lib/math/clc_exp10.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -70,7 +71,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_exp10(float x) { int return_inf = x > X_MAX; int return_zero = x < X_MIN; - int n = convert_int(x * R_64_BY_LOG10_2); + int n = __clc_convert_int(x * R_64_BY_LOG10_2); float fn = (float)n; int j = n & 0x3f; @@ -89,11 +90,11 @@ _CLC_DEF _CLC_OVERLOAD float __clc_exp10(float x) { float two_to_jby64 = USE_TABLE(exp_tbl, j); z2 = __clc_mad(two_to_jby64, z2, two_to_jby64); - float z2s = z2 * as_float(0x1 << (m + 149)); - float z2n = as_float(as_int(z2) + m2); + float z2s = z2 * __clc_as_float(0x1 << (m + 149)); + float z2n = __clc_as_float(__clc_as_int(z2) + m2); z2 = m <= -126 ? z2s : z2n; - z2 = return_inf ? as_float(PINFBITPATT_SP32) : z2; + z2 = return_inf ? __clc_as_float(PINFBITPATT_SP32) : z2; z2 = return_zero ? 0.0f : z2; z2 = return_nan ? x : z2; return z2; @@ -115,7 +116,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_exp10(double x) { // ln(10) const double R_LN10 = 0x1.26bb1bbb55516p+1; - int n = convert_int(x * R_64_BY_LOG10_2); + int n = __clc_convert_int(x * R_64_BY_LOG10_2); double dn = (double)n; @@ -144,15 +145,15 @@ _CLC_DEF _CLC_OVERLOAD double __clc_exp10(double x) { int n1 = m >> 2; int n2 = m - n1; - double z3 = z2 * as_double(((long)n1 + 1023) << 52); - z3 *= as_double(((long)n2 + 1023) << 52); + double z3 = z2 * __clc_as_double(((long)n1 + 1023) << 52); + z3 *= __clc_as_double(((long)n2 + 1023) << 52); z2 = ldexp(z2, m); z2 = small_value ? z3 : z2; z2 = __clc_isnan(x) ? x : z2; - z2 = x > X_MAX ? as_double(PINFBITPATT_DP64) : z2; + z2 = x > X_MAX ? __clc_as_double(PINFBITPATT_DP64) : z2; z2 = x < X_MIN ? 0.0 : z2; return z2; diff --git a/libclc/generic/lib/math/clc_fmod.cl b/libclc/generic/lib/math/clc_fmod.cl index a4a2ab791df68..31a5d4dc05c03 100644 --- a/libclc/generic/lib/math/clc_fmod.cl +++ b/libclc/generic/lib/math/clc_fmod.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -31,19 +32,19 @@ #include _CLC_DEF _CLC_OVERLOAD float __clc_fmod(float x, float y) { - int ux = as_int(x); + int ux = __clc_as_int(x); int ax = ux & EXSIGNBIT_SP32; - float xa = as_float(ax); + float xa = __clc_as_float(ax); int sx = ux ^ ax; int ex = ax >> EXPSHIFTBITS_SP32; - int uy = as_int(y); + int uy = __clc_as_int(y); int ay = uy & EXSIGNBIT_SP32; - float ya = as_float(ay); + float ya = __clc_as_float(ay); int ey = ay >> EXPSHIFTBITS_SP32; - float xr = as_float(0x3f800000 | (ax & 0x007fffff)); - float yr = as_float(0x3f800000 | (ay & 0x007fffff)); + float xr = __clc_as_float(0x3f800000 | (ax & 0x007fffff)); + float yr = __clc_as_float(0x3f800000 | (ay & 0x007fffff)); int c; int k = ex - ey; @@ -62,17 +63,17 @@ _CLC_DEF _CLC_OVERLOAD float __clc_fmod(float x, float y) { xr = lt ? xa : xr; yr = lt ? ya : yr; - float s = as_float(ey << EXPSHIFTBITS_SP32); + float s = __clc_as_float(ey << EXPSHIFTBITS_SP32); xr *= lt ? 1.0f : s; c = ax == ay; xr = c ? 0.0f : xr; - xr = as_float(sx ^ as_int(xr)); + xr = __clc_as_float(sx ^ __clc_as_int(xr)); c = ax > PINFBITPATT_SP32 | ay > PINFBITPATT_SP32 | ax == PINFBITPATT_SP32 | ay == 0; - xr = c ? as_float(QNANBITPATT_SP32) : xr; + xr = c ? __clc_as_float(QNANBITPATT_SP32) : xr; return xr; } @@ -80,18 +81,18 @@ _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_fmod, float, float); #ifdef cl_khr_fp64 _CLC_DEF _CLC_OVERLOAD double __clc_fmod(double x, double y) { - ulong ux = as_ulong(x); + ulong ux = __clc_as_ulong(x); ulong ax = ux & ~SIGNBIT_DP64; ulong xsgn = ux ^ ax; - double dx = as_double(ax); - int xexp = convert_int(ax >> EXPSHIFTBITS_DP64); + double dx = __clc_as_double(ax); + int xexp = __clc_convert_int(ax >> EXPSHIFTBITS_DP64); int xexp1 = 11 - (int)__clc_clz(ax & MANTBITS_DP64); xexp1 = xexp < 1 ? xexp1 : xexp; - ulong uy = as_ulong(y); + ulong uy = __clc_as_ulong(y); ulong ay = uy & ~SIGNBIT_DP64; - double dy = as_double(ay); - int yexp = convert_int(ay >> EXPSHIFTBITS_DP64); + double dy = __clc_as_double(ay); + int yexp = __clc_convert_int(ay >> EXPSHIFTBITS_DP64); int yexp1 = 11 - (int)__clc_clz(ay & MANTBITS_DP64); yexp1 = yexp < 1 ? yexp1 : yexp; @@ -151,12 +152,12 @@ _CLC_DEF _CLC_OVERLOAD double __clc_fmod(double x, double y) { dx += i ? w : 0.0; // At this point, dx lies in the range [0,dy) - double ret = as_double(xsgn ^ as_ulong(dx)); - dx = as_double(ax); + double ret = __clc_as_double(xsgn ^ __clc_as_ulong(dx)); + dx = __clc_as_double(ax); // Now handle |x| == |y| int c = dx == dy; - t = as_double(xsgn); + t = __clc_as_double(xsgn); ret = c ? t : ret; // Next, handle |x| < |y| @@ -167,7 +168,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_fmod(double x, double y) { // |y| is 0 c = dy == 0.0; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; // y is +-Inf, NaN c = yexp > BIASEDEMAX_DP64; @@ -176,7 +177,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_fmod(double x, double y) { // x is +=Inf, NaN c = xexp > BIASEDEMAX_DP64; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; return ret; } diff --git a/libclc/generic/lib/math/clc_pow.cl b/libclc/generic/lib/math/clc_pow.cl index 5dcd392c0f7ed..fce9573c39bac 100644 --- a/libclc/generic/lib/math/clc_pow.cl +++ b/libclc/generic/lib/math/clc_pow.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -68,18 +69,18 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { - int ix = as_int(x); + int ix = __clc_as_int(x); int ax = ix & EXSIGNBIT_SP32; int xpos = ix == ax; - int iy = as_int(y); + int iy = __clc_as_int(y); int ay = iy & EXSIGNBIT_SP32; int ypos = iy == ay; /* Extra precise log calculation * First handle case that x is close to 1 */ - float r = 1.0f - as_float(ax); + float r = 1.0f - __clc_as_float(ax); int near1 = __clc_fabs(r) < 0x1.0p-4f; float r2 = r * r; @@ -103,7 +104,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { /* Computations for x not near 1 */ int m = (int)(ax >> EXPSHIFTBITS_SP32) - EXPBIAS_SP32; float mf = (float)m; - int ixs = as_int(as_float(ax | 0x3f800000) - 1.0f); + int ixs = __clc_as_int(__clc_as_float(ax | 0x3f800000) - 1.0f); float mfs = (float)((ixs >> EXPSHIFTBITS_SP32) - 253); int c = m == -127; int ixn = c ? ixs : ax; @@ -112,8 +113,8 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { int indx = (ixn & 0x007f0000) + ((ixn & 0x00008000) << 1); /* F - Y */ - float f = as_float(0x3f000000 | indx) - - as_float(0x3f000000 | (ixn & MANTBITS_SP32)); + float f = __clc_as_float(0x3f000000 | indx) - + __clc_as_float(0x3f000000 | (ixn & MANTBITS_SP32)); indx = indx >> 16; float2 tv = USE_TABLE(log_inv_tbl_ep, indx); @@ -141,10 +142,10 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { lh = near1 ? lh_near1 : lh; l = near1 ? l_near1 : l; - float gh = as_float(as_int(l) & 0xfffff000); + float gh = __clc_as_float(__clc_as_int(l) & 0xfffff000); float gt = ((ltt - (lt - lth)) + ((lh - l) + lt)) + (l - gh); - float yh = as_float(iy & 0xfffff000); + float yh = __clc_as_float(iy & 0xfffff000); float yt = y - yh; @@ -155,7 +156,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { /* Extra precise exp of ylogx */ /* 64/log2 : 92.332482616893657 */ const float R_64_BY_LOG2 = 0x1.715476p+6f; - int n = convert_int(ylogx * R_64_BY_LOG2); + int n = __clc_convert_int(ylogx * R_64_BY_LOG2); float nf = (float)n; int j = n & 0x3f; @@ -178,14 +179,14 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { float expylogx = __clc_mad(tv.s0, poly, __clc_mad(tv.s1, poly, tv.s1)) + tv.s0; - float sexpylogx = expylogx * as_float(0x1 << (m + 149)); - float texpylogx = as_float(as_int(expylogx) + m2); + float sexpylogx = expylogx * __clc_as_float(0x1 << (m + 149)); + float texpylogx = __clc_as_float(__clc_as_int(expylogx) + m2); expylogx = m < -125 ? sexpylogx : texpylogx; /* Result is +-Inf if (ylogx + ylogx_t) > 128*log2 */ expylogx = (ylogx > 0x1.62e430p+6f) | (ylogx == 0x1.62e430p+6f & ylogx_t > -0x1.05c610p-22f) - ? as_float(PINFBITPATT_SP32) + ? __clc_as_float(PINFBITPATT_SP32) : expylogx; /* Result is 0 if ylogx < -149*log2 */ @@ -205,9 +206,9 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { inty = yexp < 1 ? 0 : inty; inty = yexp > 24 ? 2 : inty; - float signval = as_float((as_uint(expylogx) ^ SIGNBIT_SP32)); + float signval = __clc_as_float((__clc_as_uint(expylogx) ^ SIGNBIT_SP32)); expylogx = ((inty == 1) & !xpos) ? signval : expylogx; - int ret = as_int(expylogx); + int ret = __clc_as_int(expylogx); /* Corner case handling */ ret = (!xpos & (inty == 0)) ? QNANBITPATT_SP32 : ret; @@ -236,7 +237,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pow(float x, float y) { ret = ay == 0 ? 0x3f800000 : ret; ret = ix == 0x3f800000 ? 0x3f800000 : ret; - return as_float(ret); + return __clc_as_float(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_pow, float, float) @@ -245,11 +246,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { const double real_log2_tail = 5.76999904754328540596e-08; const double real_log2_lead = 6.93147122859954833984e-01; - long ux = as_long(x); + long ux = __clc_as_long(x); long ax = ux & (~SIGNBIT_DP64); int xpos = ax == ux; - long uy = as_long(y); + long uy = __clc_as_long(y); long ay = uy & (~SIGNBIT_DP64); int ypos = ay == uy; @@ -261,7 +262,8 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { double xexp = (double)exp; long mantissa = ax & 0x000FFFFFFFFFFFFFL; - long temp_ux = as_long(as_double(0x3ff0000000000000L | mantissa) - 1.0); + long temp_ux = + __clc_as_long(__clc_as_double(0x3ff0000000000000L | mantissa) - 1.0); exp = ((temp_ux & 0x7FF0000000000000L) >> 52) - 2045; double xexp1 = (double)exp; long mantissa1 = temp_ux & 0x000FFFFFFFFFFFFFL; @@ -273,14 +275,14 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { ((mantissa & 0x0000080000000000) << 1); int index = rax >> 44; - double F = as_double(rax | 0x3FE0000000000000L); - double Y = as_double(mantissa | 0x3FE0000000000000L); + double F = __clc_as_double(rax | 0x3FE0000000000000L); + double Y = __clc_as_double(mantissa | 0x3FE0000000000000L); double f = F - Y; double2 tv = USE_TABLE(log_f_inv_tbl, index); double log_h = tv.s0; double log_t = tv.s1; double f_inv = (log_h + log_t) * f; - double r1 = as_double(as_long(f_inv) & 0xfffffffff8000000L); + double r1 = __clc_as_double(__clc_as_long(f_inv) & 0xfffffffff8000000L); double r2 = fma(-F, r1, f) * (log_h + log_t); double r = r1 + r2; @@ -304,11 +306,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { double resT_h = poly0h; double H = resT + resH; - double H_h = as_double(as_long(H) & 0xfffffffff8000000L); + double H_h = __clc_as_double(__clc_as_long(H) & 0xfffffffff8000000L); double T = (resH - H + resT) + (resT_t - (resT + resT_h)) + (H - H_h); H = H_h; - double y_head = as_double(uy & 0xfffffffff8000000L); + double y_head = __clc_as_double(uy & 0xfffffffff8000000L); double y_tail = y - y_head; double temp = fma(y_tail, H, fma(y_head, T, y_tail * T)); @@ -354,7 +356,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { expv = fma(f, q, f2) + f1; expv = ldexp(expv, m); - expv = v > max_exp_arg ? as_double(0x7FF0000000000000L) : expv; + expv = v > max_exp_arg ? __clc_as_double(0x7FF0000000000000L) : expv; expv = v < min_exp_arg ? 0.0 : expv; } @@ -376,7 +378,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { expv *= (inty == 1) & !xpos ? -1.0 : 1.0; - long ret = as_long(expv); + long ret = __clc_as_long(expv); // Now all the edge cases ret = !xpos & (inty == 0) ? QNANBITPATT_DP64 : ret; @@ -410,7 +412,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pow(double x, double y) { ret = ay == 0L ? 0x3ff0000000000000L : ret; ret = ux == 0x3ff0000000000000L ? 0x3ff0000000000000L : ret; - return as_double(ret); + return __clc_as_double(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_pow, double, double) #endif diff --git a/libclc/generic/lib/math/clc_pown.cl b/libclc/generic/lib/math/clc_pown.cl index a0f968c238e99..a613b2998c3f6 100644 --- a/libclc/generic/lib/math/clc_pown.cl +++ b/libclc/generic/lib/math/clc_pown.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -67,17 +68,17 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { float y = (float)ny; - int ix = as_int(x); + int ix = __clc_as_int(x); int ax = ix & EXSIGNBIT_SP32; int xpos = ix == ax; - int iy = as_int(y); + int iy = __clc_as_int(y); int ay = iy & EXSIGNBIT_SP32; int ypos = iy == ay; // Extra precise log calculation // First handle case that x is close to 1 - float r = 1.0f - as_float(ax); + float r = 1.0f - __clc_as_float(ax); int near1 = __clc_fabs(r) < 0x1.0p-4f; float r2 = r * r; @@ -101,7 +102,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { // Computations for x not near 1 int m = (int)(ax >> EXPSHIFTBITS_SP32) - EXPBIAS_SP32; float mf = (float)m; - int ixs = as_int(as_float(ax | 0x3f800000) - 1.0f); + int ixs = __clc_as_int(__clc_as_float(ax | 0x3f800000) - 1.0f); float mfs = (float)((ixs >> EXPSHIFTBITS_SP32) - 253); int c = m == -127; int ixn = c ? ixs : ax; @@ -110,8 +111,8 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { int indx = (ixn & 0x007f0000) + ((ixn & 0x00008000) << 1); // F - Y - float f = as_float(0x3f000000 | indx) - - as_float(0x3f000000 | (ixn & MANTBITS_SP32)); + float f = __clc_as_float(0x3f000000 | indx) - + __clc_as_float(0x3f000000 | (ixn & MANTBITS_SP32)); indx = indx >> 16; float2 tv = USE_TABLE(log_inv_tbl_ep, indx); @@ -139,10 +140,10 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { lh = near1 ? lh_near1 : lh; l = near1 ? l_near1 : l; - float gh = as_float(as_int(l) & 0xfffff000); + float gh = __clc_as_float(__clc_as_int(l) & 0xfffff000); float gt = ((ltt - (lt - lth)) + ((lh - l) + lt)) + (l - gh); - float yh = as_float(iy & 0xfffff000); + float yh = __clc_as_float(iy & 0xfffff000); float yt = (float)(ny - (int)yh); @@ -153,7 +154,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { // Extra precise exp of ylogx // 64/log2 : 92.332482616893657 const float R_64_BY_LOG2 = 0x1.715476p+6f; - int n = convert_int(ylogx * R_64_BY_LOG2); + int n = __clc_convert_int(ylogx * R_64_BY_LOG2); float nf = (float)n; int j = n & 0x3f; @@ -176,14 +177,14 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { float expylogx = __clc_mad(tv.s0, poly, __clc_mad(tv.s1, poly, tv.s1)) + tv.s0; - float sexpylogx = expylogx * as_float(0x1 << (m + 149)); - float texpylogx = as_float(as_int(expylogx) + m2); + float sexpylogx = expylogx * __clc_as_float(0x1 << (m + 149)); + float texpylogx = __clc_as_float(__clc_as_int(expylogx) + m2); expylogx = m < -125 ? sexpylogx : texpylogx; // Result is +-Inf if (ylogx + ylogx_t) > 128*log2 expylogx = ((ylogx > 0x1.62e430p+6f) | (ylogx == 0x1.62e430p+6f & ylogx_t > -0x1.05c610p-22f)) - ? as_float(PINFBITPATT_SP32) + ? __clc_as_float(PINFBITPATT_SP32) : expylogx; // Result is 0 if ylogx < -149*log2 @@ -196,9 +197,9 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { int inty = 2 - (ny & 1); - float signval = as_float((as_uint(expylogx) ^ SIGNBIT_SP32)); + float signval = __clc_as_float((__clc_as_uint(expylogx) ^ SIGNBIT_SP32)); expylogx = ((inty == 1) & !xpos) ? signval : expylogx; - int ret = as_int(expylogx); + int ret = __clc_as_int(expylogx); // Corner case handling int xinf = xpos ? PINFBITPATT_SP32 : NINFBITPATT_SP32; @@ -218,7 +219,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_pown(float x, int ny) { ret = ax > PINFBITPATT_SP32 ? ix : ret; ret = ny == 0 ? 0x3f800000 : ret; - return as_float(ret); + return __clc_as_float(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_pown, float, int) @@ -229,11 +230,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { double y = (double)ny; - long ux = as_long(x); + long ux = __clc_as_long(x); long ax = ux & (~SIGNBIT_DP64); int xpos = ax == ux; - long uy = as_long(y); + long uy = __clc_as_long(y); long ay = uy & (~SIGNBIT_DP64); int ypos = ay == uy; @@ -245,7 +246,8 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { double xexp = (double)exp; long mantissa = ax & 0x000FFFFFFFFFFFFFL; - long temp_ux = as_long(as_double(0x3ff0000000000000L | mantissa) - 1.0); + long temp_ux = + __clc_as_long(__clc_as_double(0x3ff0000000000000L | mantissa) - 1.0); exp = ((temp_ux & 0x7FF0000000000000L) >> 52) - 2045; double xexp1 = (double)exp; long mantissa1 = temp_ux & 0x000FFFFFFFFFFFFFL; @@ -257,14 +259,14 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { ((mantissa & 0x0000080000000000) << 1); int index = rax >> 44; - double F = as_double(rax | 0x3FE0000000000000L); - double Y = as_double(mantissa | 0x3FE0000000000000L); + double F = __clc_as_double(rax | 0x3FE0000000000000L); + double Y = __clc_as_double(mantissa | 0x3FE0000000000000L); double f = F - Y; double2 tv = USE_TABLE(log_f_inv_tbl, index); double log_h = tv.s0; double log_t = tv.s1; double f_inv = (log_h + log_t) * f; - double r1 = as_double(as_long(f_inv) & 0xfffffffff8000000L); + double r1 = __clc_as_double(__clc_as_long(f_inv) & 0xfffffffff8000000L); double r2 = fma(-F, r1, f) * (log_h + log_t); double r = r1 + r2; @@ -288,15 +290,15 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { double resT_h = poly0h; double H = resT + resH; - double H_h = as_double(as_long(H) & 0xfffffffff8000000L); + double H_h = __clc_as_double(__clc_as_long(H) & 0xfffffffff8000000L); double T = (resH - H + resT) + (resT_t - (resT + resT_h)) + (H - H_h); H = H_h; - double y_head = as_double(uy & 0xfffffffff8000000L); + double y_head = __clc_as_double(uy & 0xfffffffff8000000L); double y_tail = y - y_head; int mask_2_24 = ay > 0x4170000000000000; // 2^24 - int nyh = convert_int(y_head); + int nyh = __clc_convert_int(y_head); int nyt = ny - nyh; double y_tail1 = (double)nyt; y_tail = mask_2_24 ? y_tail1 : y_tail; @@ -344,7 +346,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { expv = fma(f, q, f2) + f1; expv = ldexp(expv, m); - expv = v > max_exp_arg ? as_double(0x7FF0000000000000L) : expv; + expv = v > max_exp_arg ? __clc_as_double(0x7FF0000000000000L) : expv; expv = v < min_exp_arg ? 0.0 : expv; } @@ -357,7 +359,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { expv *= ((inty == 1) & !xpos) ? -1.0 : 1.0; - long ret = as_long(expv); + long ret = __clc_as_long(expv); // Now all the edge cases long xinf = xpos ? PINFBITPATT_DP64 : NINFBITPATT_DP64; @@ -378,7 +380,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_pown(double x, int ny) { ret = ax > PINFBITPATT_DP64 ? ux : ret; ret = ny == 0 ? 0x3ff0000000000000L : ret; - return as_double(ret); + return __clc_as_double(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_pown, double, int) #endif @@ -388,7 +390,7 @@ _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_pown, double, int) #pragma OPENCL EXTENSION cl_khr_fp16 : enable _CLC_OVERLOAD _CLC_DEF half __clc_pown(half x, int y) { - return (half)__clc_pown((float)x, y); + return (half)__clc_pown((float)x, y); } _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __clc_pown, half, int); diff --git a/libclc/generic/lib/math/clc_powr.cl b/libclc/generic/lib/math/clc_powr.cl index 7e1a6f2a02e7a..7876acaee89a6 100644 --- a/libclc/generic/lib/math/clc_powr.cl +++ b/libclc/generic/lib/math/clc_powr.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -65,17 +66,17 @@ // ((((expT * poly) + expT) + expH*poly) + expH) _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { - int ix = as_int(x); + int ix = __clc_as_int(x); int ax = ix & EXSIGNBIT_SP32; int xpos = ix == ax; - int iy = as_int(y); + int iy = __clc_as_int(y); int ay = iy & EXSIGNBIT_SP32; int ypos = iy == ay; // Extra precise log calculation // First handle case that x is close to 1 - float r = 1.0f - as_float(ax); + float r = 1.0f - __clc_as_float(ax); int near1 = __clc_fabs(r) < 0x1.0p-4f; float r2 = r * r; @@ -99,7 +100,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { // Computations for x not near 1 int m = (int)(ax >> EXPSHIFTBITS_SP32) - EXPBIAS_SP32; float mf = (float)m; - int ixs = as_int(as_float(ax | 0x3f800000) - 1.0f); + int ixs = __clc_as_int(__clc_as_float(ax | 0x3f800000) - 1.0f); float mfs = (float)((ixs >> EXPSHIFTBITS_SP32) - 253); int c = m == -127; int ixn = c ? ixs : ax; @@ -108,8 +109,8 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { int indx = (ixn & 0x007f0000) + ((ixn & 0x00008000) << 1); // F - Y - float f = as_float(0x3f000000 | indx) - - as_float(0x3f000000 | (ixn & MANTBITS_SP32)); + float f = __clc_as_float(0x3f000000 | indx) - + __clc_as_float(0x3f000000 | (ixn & MANTBITS_SP32)); indx = indx >> 16; float2 tv = USE_TABLE(log_inv_tbl_ep, indx); @@ -137,10 +138,10 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { lh = near1 ? lh_near1 : lh; l = near1 ? l_near1 : l; - float gh = as_float(as_int(l) & 0xfffff000); + float gh = __clc_as_float(__clc_as_int(l) & 0xfffff000); float gt = ((ltt - (lt - lth)) + ((lh - l) + lt)) + (l - gh); - float yh = as_float(iy & 0xfffff000); + float yh = __clc_as_float(iy & 0xfffff000); float yt = y - yh; @@ -151,7 +152,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { // Extra precise exp of ylogx // 64/log2 : 92.332482616893657 const float R_64_BY_LOG2 = 0x1.715476p+6f; - int n = convert_int(ylogx * R_64_BY_LOG2); + int n = __clc_convert_int(ylogx * R_64_BY_LOG2); float nf = (float)n; int j = n & 0x3f; @@ -173,14 +174,14 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { float expylogx = __clc_mad(tv.s0, poly, __clc_mad(tv.s1, poly, tv.s1)) + tv.s0; - float sexpylogx = expylogx * as_float(0x1 << (m + 149)); - float texpylogx = as_float(as_int(expylogx) + m2); + float sexpylogx = expylogx * __clc_as_float(0x1 << (m + 149)); + float texpylogx = __clc_as_float(__clc_as_int(expylogx) + m2); expylogx = m < -125 ? sexpylogx : texpylogx; // Result is +-Inf if (ylogx + ylogx_t) > 128*log2 expylogx = ((ylogx > 0x1.62e430p+6f) | (ylogx == 0x1.62e430p+6f & ylogx_t > -0x1.05c610p-22f)) - ? as_float(PINFBITPATT_SP32) + ? __clc_as_float(PINFBITPATT_SP32) : expylogx; // Result is 0 if ylogx < -149*log2 @@ -199,9 +200,9 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { inty = yexp < 1 ? 0 : inty; inty = yexp > 24 ? 2 : inty; - float signval = as_float((as_uint(expylogx) ^ SIGNBIT_SP32)); + float signval = __clc_as_float((__clc_as_uint(expylogx) ^ SIGNBIT_SP32)); expylogx = ((inty == 1) & !xpos) ? signval : expylogx; - int ret = as_int(expylogx); + int ret = __clc_as_int(expylogx); // Corner case handling ret = ax < 0x3f800000 & iy == NINFBITPATT_SP32 ? PINFBITPATT_SP32 : ret; @@ -223,7 +224,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_powr(float x, float y) { ret = ax > PINFBITPATT_SP32 ? ix : ret; ret = ay > PINFBITPATT_SP32 ? iy : ret; - return as_float(ret); + return __clc_as_float(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_powr, float, float) @@ -232,11 +233,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { const double real_log2_tail = 5.76999904754328540596e-08; const double real_log2_lead = 6.93147122859954833984e-01; - long ux = as_long(x); + long ux = __clc_as_long(x); long ax = ux & (~SIGNBIT_DP64); int xpos = ax == ux; - long uy = as_long(y); + long uy = __clc_as_long(y); long ay = uy & (~SIGNBIT_DP64); int ypos = ay == uy; @@ -248,7 +249,8 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { double xexp = (double)exp; long mantissa = ax & 0x000FFFFFFFFFFFFFL; - long temp_ux = as_long(as_double(0x3ff0000000000000L | mantissa) - 1.0); + long temp_ux = + __clc_as_long(__clc_as_double(0x3ff0000000000000L | mantissa) - 1.0); exp = ((temp_ux & 0x7FF0000000000000L) >> 52) - 2045; double xexp1 = (double)exp; long mantissa1 = temp_ux & 0x000FFFFFFFFFFFFFL; @@ -260,14 +262,14 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { ((mantissa & 0x0000080000000000) << 1); int index = rax >> 44; - double F = as_double(rax | 0x3FE0000000000000L); - double Y = as_double(mantissa | 0x3FE0000000000000L); + double F = __clc_as_double(rax | 0x3FE0000000000000L); + double Y = __clc_as_double(mantissa | 0x3FE0000000000000L); double f = F - Y; double2 tv = USE_TABLE(log_f_inv_tbl, index); double log_h = tv.s0; double log_t = tv.s1; double f_inv = (log_h + log_t) * f; - double r1 = as_double(as_long(f_inv) & 0xfffffffff8000000L); + double r1 = __clc_as_double(__clc_as_long(f_inv) & 0xfffffffff8000000L); double r2 = fma(-F, r1, f) * (log_h + log_t); double r = r1 + r2; @@ -291,11 +293,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { double resT_h = poly0h; double H = resT + resH; - double H_h = as_double(as_long(H) & 0xfffffffff8000000L); + double H_h = __clc_as_double(__clc_as_long(H) & 0xfffffffff8000000L); double T = (resH - H + resT) + (resT_t - (resT + resT_h)) + (H - H_h); H = H_h; - double y_head = as_double(uy & 0xfffffffff8000000L); + double y_head = __clc_as_double(uy & 0xfffffffff8000000L); double y_tail = y - y_head; double temp = fma(y_tail, H, fma(y_head, T, y_tail * T)); @@ -341,7 +343,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { expv = fma(f, q, f2) + f1; expv = ldexp(expv, m); - expv = v > max_exp_arg ? as_double(0x7FF0000000000000L) : expv; + expv = v > max_exp_arg ? __clc_as_double(0x7FF0000000000000L) : expv; expv = v < min_exp_arg ? 0.0 : expv; } @@ -363,7 +365,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { expv *= ((inty == 1) & !xpos) ? -1.0 : 1.0; - long ret = as_long(expv); + long ret = __clc_as_long(expv); // Now all the edge cases ret = ax < 0x3ff0000000000000L & uy == NINFBITPATT_DP64 ? PINFBITPATT_DP64 @@ -389,7 +391,8 @@ _CLC_DEF _CLC_OVERLOAD double __clc_powr(double x, double y) { ret = ax > PINFBITPATT_DP64 ? ux : ret; ret = ay > PINFBITPATT_DP64 ? uy : ret; - return as_double(ret); + return __clc_as_double(ret); } -_CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_powr, double, double) +_CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_powr, double, + double) #endif diff --git a/libclc/generic/lib/math/clc_remainder.cl b/libclc/generic/lib/math/clc_remainder.cl index 31d17d5aaf6b6..6302b9776782f 100644 --- a/libclc/generic/lib/math/clc_remainder.cl +++ b/libclc/generic/lib/math/clc_remainder.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -31,19 +32,19 @@ #include _CLC_DEF _CLC_OVERLOAD float __clc_remainder(float x, float y) { - int ux = as_int(x); + int ux = __clc_as_int(x); int ax = ux & EXSIGNBIT_SP32; - float xa = as_float(ax); + float xa = __clc_as_float(ax); int sx = ux ^ ax; int ex = ax >> EXPSHIFTBITS_SP32; - int uy = as_int(y); + int uy = __clc_as_int(y); int ay = uy & EXSIGNBIT_SP32; - float ya = as_float(ay); + float ya = __clc_as_float(ay); int ey = ay >> EXPSHIFTBITS_SP32; - float xr = as_float(0x3f800000 | (ax & 0x007fffff)); - float yr = as_float(0x3f800000 | (ay & 0x007fffff)); + float xr = __clc_as_float(0x3f800000 | (ax & 0x007fffff)); + float yr = __clc_as_float(0x3f800000 | (ay & 0x007fffff)); int c; int k = ex - ey; @@ -71,17 +72,17 @@ _CLC_DEF _CLC_OVERLOAD float __clc_remainder(float x, float y) { xr -= c ? yr : 0.0f; q += c; - float s = as_float(ey << EXPSHIFTBITS_SP32); + float s = __clc_as_float(ey << EXPSHIFTBITS_SP32); xr *= lt ? 1.0f : s; c = ax == ay; xr = c ? 0.0f : xr; - xr = as_float(sx ^ as_int(xr)); + xr = __clc_as_float(sx ^ __clc_as_int(xr)); c = ax > PINFBITPATT_SP32 | ay > PINFBITPATT_SP32 | ax == PINFBITPATT_SP32 | ay == 0; - xr = c ? as_float(QNANBITPATT_SP32) : xr; + xr = c ? __clc_as_float(QNANBITPATT_SP32) : xr; return xr; } @@ -90,18 +91,18 @@ _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_remainder, float, #ifdef cl_khr_fp64 _CLC_DEF _CLC_OVERLOAD double __clc_remainder(double x, double y) { - ulong ux = as_ulong(x); + ulong ux = __clc_as_ulong(x); ulong ax = ux & ~SIGNBIT_DP64; ulong xsgn = ux ^ ax; - double dx = as_double(ax); - int xexp = convert_int(ax >> EXPSHIFTBITS_DP64); + double dx = __clc_as_double(ax); + int xexp = __clc_convert_int(ax >> EXPSHIFTBITS_DP64); int xexp1 = 11 - (int)__clc_clz(ax & MANTBITS_DP64); xexp1 = xexp < 1 ? xexp1 : xexp; - ulong uy = as_ulong(y); + ulong uy = __clc_as_ulong(y); ulong ay = uy & ~SIGNBIT_DP64; - double dy = as_double(ay); - int yexp = convert_int(ay >> EXPSHIFTBITS_DP64); + double dy = __clc_as_double(ay); + int yexp = __clc_convert_int(ay >> EXPSHIFTBITS_DP64); int yexp1 = 11 - (int)__clc_clz(ay & MANTBITS_DP64); yexp1 = yexp < 1 ? yexp1 : yexp; @@ -181,12 +182,12 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remainder(double x, double y) { dx = dy < 0x1.0p+1022 ? dxl : dxg; - double ret = as_double(xsgn ^ as_ulong(dx)); - dx = as_double(ax); + double ret = __clc_as_double(xsgn ^ __clc_as_ulong(dx)); + dx = __clc_as_double(ax); // Now handle |x| == |y| int c = dx == dy; - t = as_double(xsgn); + t = __clc_as_double(xsgn); ret = c ? t : ret; // Next, handle |x| < |y| @@ -203,7 +204,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remainder(double x, double y) { // |y| is 0 c = dy == 0.0; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; // y is +-Inf, NaN c = yexp > BIASEDEMAX_DP64; @@ -212,7 +213,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remainder(double x, double y) { // x is +=Inf, NaN c = xexp > BIASEDEMAX_DP64; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; return ret; } diff --git a/libclc/generic/lib/math/clc_remquo.cl b/libclc/generic/lib/math/clc_remquo.cl index af3e7a2b07500..699517e180708 100644 --- a/libclc/generic/lib/math/clc_remquo.cl +++ b/libclc/generic/lib/math/clc_remquo.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -34,20 +35,20 @@ _CLC_DEF _CLC_OVERLOAD float __clc_remquo(float x, float y, __private int *quo) { x = __clc_flush_denormal_if_not_supported(x); y = __clc_flush_denormal_if_not_supported(y); - int ux = as_int(x); + int ux = __clc_as_int(x); int ax = ux & EXSIGNBIT_SP32; - float xa = as_float(ax); + float xa = __clc_as_float(ax); int sx = ux ^ ax; int ex = ax >> EXPSHIFTBITS_SP32; - int uy = as_int(y); + int uy = __clc_as_int(y); int ay = uy & EXSIGNBIT_SP32; - float ya = as_float(ay); + float ya = __clc_as_float(ay); int sy = uy ^ ay; int ey = ay >> EXPSHIFTBITS_SP32; - float xr = as_float(0x3f800000 | (ax & 0x007fffff)); - float yr = as_float(0x3f800000 | (ay & 0x007fffff)); + float xr = __clc_as_float(0x3f800000 | (ax & 0x007fffff)); + float yr = __clc_as_float(0x3f800000 | (ay & 0x007fffff)); int c; int k = ex - ey; @@ -75,7 +76,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_remquo(float x, float y, xr -= c ? yr : 0.0f; q += c; - float s = as_float(ey << EXPSHIFTBITS_SP32); + float s = __clc_as_float(ey << EXPSHIFTBITS_SP32); xr *= lt ? 1.0f : s; int qsgn = sx == sy ? 1 : -1; @@ -85,12 +86,12 @@ _CLC_DEF _CLC_OVERLOAD float __clc_remquo(float x, float y, quot = c ? qsgn : quot; xr = c ? 0.0f : xr; - xr = as_float(sx ^ as_int(xr)); + xr = __clc_as_float(sx ^ __clc_as_int(xr)); c = ax > PINFBITPATT_SP32 | ay > PINFBITPATT_SP32 | ax == PINFBITPATT_SP32 | ay == 0; quot = c ? 0 : quot; - xr = c ? as_float(QNANBITPATT_SP32) : xr; + xr = c ? __clc_as_float(QNANBITPATT_SP32) : xr; *quo = quot; @@ -130,18 +131,18 @@ __VEC_REMQUO(float, 16, 8) #ifdef cl_khr_fp64 _CLC_DEF _CLC_OVERLOAD double __clc_remquo(double x, double y, __private int *pquo) { - ulong ux = as_ulong(x); + ulong ux = __clc_as_ulong(x); ulong ax = ux & ~SIGNBIT_DP64; ulong xsgn = ux ^ ax; - double dx = as_double(ax); - int xexp = convert_int(ax >> EXPSHIFTBITS_DP64); + double dx = __clc_as_double(ax); + int xexp = __clc_convert_int(ax >> EXPSHIFTBITS_DP64); int xexp1 = 11 - (int)__clc_clz(ax & MANTBITS_DP64); xexp1 = xexp < 1 ? xexp1 : xexp; - ulong uy = as_ulong(y); + ulong uy = __clc_as_ulong(y); ulong ay = uy & ~SIGNBIT_DP64; - double dy = as_double(ay); - int yexp = convert_int(ay >> EXPSHIFTBITS_DP64); + double dy = __clc_as_double(ay); + int yexp = __clc_convert_int(ay >> EXPSHIFTBITS_DP64); int yexp1 = 11 - (int)__clc_clz(ay & MANTBITS_DP64); yexp1 = yexp < 1 ? yexp1 : yexp; @@ -223,12 +224,12 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remquo(double x, double y, lt += dy < 0x1.0p+1022 ? al : ag; int quo = ((int)lt & 0x7f) * qsgn; - double ret = as_double(xsgn ^ as_ulong(dx)); - dx = as_double(ax); + double ret = __clc_as_double(xsgn ^ __clc_as_ulong(dx)); + dx = __clc_as_double(ax); // Now handle |x| == |y| int c = dx == dy; - t = as_double(xsgn); + t = __clc_as_double(xsgn); quo = c ? qsgn : quo; ret = c ? t : ret; @@ -249,7 +250,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remquo(double x, double y, // |y| is 0 c = dy == 0.0; quo = c ? 0 : quo; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; // y is +-Inf, NaN c = yexp > BIASEDEMAX_DP64; @@ -260,7 +261,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_remquo(double x, double y, // x is +=Inf, NaN c = xexp > BIASEDEMAX_DP64; quo = c ? 0 : quo; - ret = c ? as_double(QNANBITPATT_DP64) : ret; + ret = c ? __clc_as_double(QNANBITPATT_DP64) : ret; *pquo = quo; return ret; diff --git a/libclc/generic/lib/math/clc_rootn.cl b/libclc/generic/lib/math/clc_rootn.cl index 42b983784c14d..dabaa2a4f3f2a 100644 --- a/libclc/generic/lib/math/clc_rootn.cl +++ b/libclc/generic/lib/math/clc_rootn.cl @@ -21,6 +21,7 @@ */ #include +#include #include #include #include @@ -67,17 +68,17 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { float y = MATH_RECIP((float)ny); - int ix = as_int(x); + int ix = __clc_as_int(x); int ax = ix & EXSIGNBIT_SP32; int xpos = ix == ax; - int iy = as_int(y); + int iy = __clc_as_int(y); int ay = iy & EXSIGNBIT_SP32; int ypos = iy == ay; // Extra precise log calculation // First handle case that x is close to 1 - float r = 1.0f - as_float(ax); + float r = 1.0f - __clc_as_float(ax); int near1 = __clc_fabs(r) < 0x1.0p-4f; float r2 = r * r; @@ -101,7 +102,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { // Computations for x not near 1 int m = (int)(ax >> EXPSHIFTBITS_SP32) - EXPBIAS_SP32; float mf = (float)m; - int ixs = as_int(as_float(ax | 0x3f800000) - 1.0f); + int ixs = __clc_as_int(__clc_as_float(ax | 0x3f800000) - 1.0f); float mfs = (float)((ixs >> EXPSHIFTBITS_SP32) - 253); int c = m == -127; int ixn = c ? ixs : ax; @@ -110,8 +111,8 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { int indx = (ixn & 0x007f0000) + ((ixn & 0x00008000) << 1); // F - Y - float f = as_float(0x3f000000 | indx) - - as_float(0x3f000000 | (ixn & MANTBITS_SP32)); + float f = __clc_as_float(0x3f000000 | indx) - + __clc_as_float(0x3f000000 | (ixn & MANTBITS_SP32)); indx = indx >> 16; float2 tv = USE_TABLE(log_inv_tbl_ep, indx); @@ -139,13 +140,13 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { lh = near1 ? lh_near1 : lh; l = near1 ? l_near1 : l; - float gh = as_float(as_int(l) & 0xfffff000); + float gh = __clc_as_float(__clc_as_int(l) & 0xfffff000); float gt = ((ltt - (lt - lth)) + ((lh - l) + lt)) + (l - gh); - float yh = as_float(iy & 0xfffff000); + float yh = __clc_as_float(iy & 0xfffff000); float fny = (float)ny; - float fnyh = as_float(as_int(fny) & 0xfffff000); + float fnyh = __clc_as_float(__clc_as_int(fny) & 0xfffff000); float fnyt = (float)(ny - (int)fnyh); float yt = MATH_DIVIDE(__clc_mad(-fnyt, yh, __clc_mad(-fnyh, yh, 1.0f)), fny); @@ -155,7 +156,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { // Extra precise exp of ylogx const float R_64_BY_LOG2 = 0x1.715476p+6f; // 64/log2 : 92.332482616893657 - int n = convert_int(ylogx * R_64_BY_LOG2); + int n = __clc_convert_int(ylogx * R_64_BY_LOG2); float nf = (float)n; int j = n & 0x3f; @@ -179,16 +180,16 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { float expylogx = __clc_mad(tv.s0, poly, __clc_mad(tv.s1, poly, tv.s1)) + tv.s0; float sexpylogx = __clc_fp32_subnormals_supported() - ? expylogx * as_float(0x1 << (m + 149)) + ? expylogx * __clc_as_float(0x1 << (m + 149)) : 0.0f; - float texpylogx = as_float(as_int(expylogx) + m2); + float texpylogx = __clc_as_float(__clc_as_int(expylogx) + m2); expylogx = m < -125 ? sexpylogx : texpylogx; // Result is +-Inf if (ylogx + ylogx_t) > 128*log2 expylogx = ((ylogx > 0x1.62e430p+6f) | (ylogx == 0x1.62e430p+6f & ylogx_t > -0x1.05c610p-22f)) - ? as_float(PINFBITPATT_SP32) + ? __clc_as_float(PINFBITPATT_SP32) : expylogx; // Result is 0 if ylogx < -149*log2 @@ -201,9 +202,9 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { int inty = 2 - (ny & 1); - float signval = as_float((as_uint(expylogx) ^ SIGNBIT_SP32)); + float signval = __clc_as_float((__clc_as_uint(expylogx) ^ SIGNBIT_SP32)); expylogx = ((inty == 1) & !xpos) ? signval : expylogx; - int ret = as_int(expylogx); + int ret = __clc_as_int(expylogx); // Corner case handling ret = (!xpos & (inty == 2)) ? QNANBITPATT_SP32 : ret; @@ -221,7 +222,7 @@ _CLC_DEF _CLC_OVERLOAD float __clc_rootn(float x, int ny) { ret = ax > PINFBITPATT_SP32 ? ix : ret; ret = ny == 0 ? QNANBITPATT_SP32 : ret; - return as_float(ret); + return __clc_as_float(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_rootn, float, int) @@ -233,11 +234,11 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { double dny = (double)ny; double y = 1.0 / dny; - long ux = as_long(x); + long ux = __clc_as_long(x); long ax = ux & (~SIGNBIT_DP64); int xpos = ax == ux; - long uy = as_long(y); + long uy = __clc_as_long(y); long ay = uy & (~SIGNBIT_DP64); int ypos = ay == uy; @@ -249,7 +250,8 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { double xexp = (double)exp; long mantissa = ax & 0x000FFFFFFFFFFFFFL; - long temp_ux = as_long(as_double(0x3ff0000000000000L | mantissa) - 1.0); + long temp_ux = + __clc_as_long(__clc_as_double(0x3ff0000000000000L | mantissa) - 1.0); exp = ((temp_ux & 0x7FF0000000000000L) >> 52) - 2045; double xexp1 = (double)exp; long mantissa1 = temp_ux & 0x000FFFFFFFFFFFFFL; @@ -261,14 +263,14 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { ((mantissa & 0x0000080000000000) << 1); int index = rax >> 44; - double F = as_double(rax | 0x3FE0000000000000L); - double Y = as_double(mantissa | 0x3FE0000000000000L); + double F = __clc_as_double(rax | 0x3FE0000000000000L); + double Y = __clc_as_double(mantissa | 0x3FE0000000000000L); double f = F - Y; double2 tv = USE_TABLE(log_f_inv_tbl, index); double log_h = tv.s0; double log_t = tv.s1; double f_inv = (log_h + log_t) * f; - double r1 = as_double(as_long(f_inv) & 0xfffffffff8000000L); + double r1 = __clc_as_double(__clc_as_long(f_inv) & 0xfffffffff8000000L); double r2 = fma(-F, r1, f) * (log_h + log_t); double r = r1 + r2; @@ -292,14 +294,14 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { double resT_h = poly0h; double H = resT + resH; - double H_h = as_double(as_long(H) & 0xfffffffff8000000L); + double H_h = __clc_as_double(__clc_as_long(H) & 0xfffffffff8000000L); double T = (resH - H + resT) + (resT_t - (resT + resT_h)) + (H - H_h); H = H_h; - double y_head = as_double(uy & 0xfffffffff8000000L); + double y_head = __clc_as_double(uy & 0xfffffffff8000000L); double y_tail = y - y_head; - double fnyh = as_double(as_long(dny) & 0xfffffffffff00000); + double fnyh = __clc_as_double(__clc_as_long(dny) & 0xfffffffffff00000); double fnyt = (double)(ny - (int)fnyh); y_tail = fma(-fnyt, y_head, fma(-fnyh, y_head, 1.0)) / dny; @@ -346,7 +348,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { expv = fma(f, q, f2) + f1; expv = ldexp(expv, m); - expv = v > max_exp_arg ? as_double(0x7FF0000000000000L) : expv; + expv = v > max_exp_arg ? __clc_as_double(0x7FF0000000000000L) : expv; expv = v < min_exp_arg ? 0.0 : expv; } @@ -359,7 +361,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { expv *= ((inty == 1) & !xpos) ? -1.0 : 1.0; - long ret = as_long(expv); + long ret = __clc_as_long(expv); // Now all the edge cases ret = (!xpos & (inty == 2)) ? QNANBITPATT_DP64 : ret; @@ -377,7 +379,7 @@ _CLC_DEF _CLC_OVERLOAD double __clc_rootn(double x, int ny) { ret = ((ux == PINFBITPATT_DP64) & ypos) ? PINFBITPATT_DP64 : ret; ret = ax > PINFBITPATT_DP64 ? ux : ret; ret = ny == 0 ? QNANBITPATT_DP64 : ret; - return as_double(ret); + return __clc_as_double(ret); } _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_rootn, double, int) #endif @@ -387,7 +389,7 @@ _CLC_BINARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_rootn, double, int) #pragma OPENCL EXTENSION cl_khr_fp16 : enable _CLC_OVERLOAD _CLC_DEF half __clc_rootn(half x, int y) { - return (half)__clc_rootn((float)x, y); + return (half)__clc_rootn((float)x, y); } _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __clc_rootn, half, int); diff --git a/libclc/r600/lib/image/get_image_attributes_impl.ll b/libclc/r600/lib/image/get_image_attributes_impl.ll index f867ab6603591..7f1965de7602c 100644 --- a/libclc/r600/lib/image/get_image_attributes_impl.ll +++ b/libclc/r600/lib/image/get_image_attributes_impl.ll @@ -1,5 +1,3 @@ -target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" - %opencl.image2d_t = type opaque %opencl.image3d_t = type opaque diff --git a/libclc/r600/lib/image/read_image_impl.ll b/libclc/r600/lib/image/read_image_impl.ll index ca2e465b4b5b8..229a2526c3743 100644 --- a/libclc/r600/lib/image/read_image_impl.ll +++ b/libclc/r600/lib/image/read_image_impl.ll @@ -1,5 +1,3 @@ -target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" - %opencl.image2d_t = type opaque declare <4 x float> @llvm.R600.tex(<4 x float>, i32, i32, i32, i32, i32, i32, diff --git a/libclc/r600/lib/image/write_image_impl.ll b/libclc/r600/lib/image/write_image_impl.ll index 03595ba1db737..265f5d6045e42 100644 --- a/libclc/r600/lib/image/write_image_impl.ll +++ b/libclc/r600/lib/image/write_image_impl.ll @@ -1,5 +1,3 @@ -target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" - %opencl.image2d_t = type opaque %opencl.image3d_t = type opaque diff --git a/libcxx/docs/DesignDocs/ExperimentalFeatures.rst b/libcxx/docs/DesignDocs/ExperimentalFeatures.rst index 0dbbd5f869e36..f9b23493b2356 100644 --- a/libcxx/docs/DesignDocs/ExperimentalFeatures.rst +++ b/libcxx/docs/DesignDocs/ExperimentalFeatures.rst @@ -161,7 +161,9 @@ has been removed in LLVM 17.0. `Networking TS `__ ------------------------------------------- The Networking TS is not yet part of a shipping standard, and there is discussion around removing it. -Libc++ never shipped an implementation of the Networking TS and does not plan to do so in the future. +Libc++ never shipped an implementation of the Networking TS and does not plan to do so in the future, +unless the C++ Standards Committee expresses a desire to merge the Networking TS into the IS (which is +unlikely at this point). `Ranges TS `__ --------------------------------------- diff --git a/libcxx/docs/Status/Cxx2cIssues.csv b/libcxx/docs/Status/Cxx2cIssues.csv index 45faea0568b2e..1ec23dfabd5ea 100644 --- a/libcxx/docs/Status/Cxx2cIssues.csv +++ b/libcxx/docs/Status/Cxx2cIssues.csv @@ -111,6 +111,16 @@ "`LWG4169 `__","``std::atomic``'s default constructor should be constrained","2024-11 (WrocÅ‚aw)","","","" "`LWG4170 `__","``contiguous_iterator`` should require ``to_address(I{})``","2024-11 (WrocÅ‚aw)","","","" "","","","","","" +"`LWG3578 `__","Iterator SCARYness in the context of associative container merging","2025-02 (Hagenberg)","","","" +"`LWG3956 `__","``chrono::parse`` uses ``from_stream`` as a customization point","2025-02 (Hagenberg)","","","" +"`LWG4172 `__","``unique_lock`` self-move-assignment is broken","2025-02 (Hagenberg)","","","" +"`LWG4175 `__","``get_env()`` specified in terms of ``as_const()`` but this doesn't work with rvalue senders","2025-02 (Hagenberg)","","","" +"`LWG4179 `__","Wrong range in ``[alg.search]``","2025-02 (Hagenberg)","","","" +"`LWG4186 `__","``regex_traits::transform_primary`` mistakenly detects ``typeid`` of a function","2025-02 (Hagenberg)","","","" +"`LWG4189 `__","``cache_latest_view`` should be freestanding","2025-02 (Hagenberg)","","","" +"`LWG4191 `__","P1467 changed the return type of ``pow(complex, int)``","2025-02 (Hagenberg)","","","" +"`LWG4196 `__","Complexity of ``inplace_merge()`` is incorrect","2025-02 (Hagenberg)","","","" +"","","","","","" "`LWG3343 `__","Ordering of calls to ``unlock()`` and ``notify_all()`` in Effects element of ``notify_all_at_thread_exit()`` should be reversed","Not Adopted Yet","|Complete|","16","" "`LWG4139 `__","§[time.zone.leap] recursive constraint in <=>","Not Adopted Yet","|Complete|","20","" "`LWG3456 `__","Pattern used by std::from_chars is underspecified (option B)","Not Adopted Yet","|Complete|","20","" diff --git a/libcxx/docs/Status/Cxx2cPapers.csv b/libcxx/docs/Status/Cxx2cPapers.csv index b2bb1d6e9d6c3..1436db6cf2b45 100644 --- a/libcxx/docs/Status/Cxx2cPapers.csv +++ b/libcxx/docs/Status/Cxx2cPapers.csv @@ -79,7 +79,6 @@ "`P3136R1 `__","Retiring niebloids","2024-11 (WrocÅ‚aw)","","","" "`P3138R5 `__","``views::cache_latest``","2024-11 (WrocÅ‚aw)","","","" "`P3379R0 `__","Constrain ``std::expected`` equality operators","2024-11 (WrocÅ‚aw)","","","" -"`P0472R2 `__","Put ``std::monostate`` in ````","2024-11 (WrocÅ‚aw)","","","" "`P2862R1 `__","``text_encoding::name()`` should never return null values","2024-11 (WrocÅ‚aw)","","","" "`P2897R7 `__","``aligned_accessor``: An ``mdspan`` accessor expressing pointer over-alignment","2024-11 (WrocÅ‚aw)","","","" "`P3355R1 `__","Fix ``submdspan`` for C++26","2024-11 (WrocÅ‚aw)","","","" @@ -92,9 +91,29 @@ "`P3369R0 `__","constexpr for ``uninitialized_default_construct``","2024-11 (WrocÅ‚aw)","","","" "`P3370R1 `__","Add new library headers from C23","2024-11 (WrocÅ‚aw)","","","" "`P3309R3 `__","constexpr ``atomic`` and ``atomic_ref``","2024-11 (WrocÅ‚aw)","","","" -"`P3019R11 `__","``indirect`` and ``polymorphic``: Vocabulary Types for Composite Class Design","2024-11 (WrocÅ‚aw)","","","" "`P1928R15 `__","``std::simd`` — merge data-parallel types from the Parallelism TS 2","2024-11 (WrocÅ‚aw)","","","" "`P3325R5 `__","A Utility for Creating Execution Environments","2024-11 (WrocÅ‚aw)","","","" "`P3068R6 `__","Allowing exception throwing in constant-evaluation","2024-11 (WrocÅ‚aw)","","","" "`P3247R2 `__","Deprecate the notion of trivial types","2024-11 (WrocÅ‚aw)","","","" "","","","","","" +"`P3074R7 `__","trivial unions (was ``std::uninitialized``)","2025-02 (Hagenberg)","","","" +"`P1494R5 `__","Partial program correctness","2025-02 (Hagenberg)","","","" +"`P2900R14 `__","Contracts for C++","2025-02 (Hagenberg)","","","" +"`P3475R2 `__","Defang and deprecate ``memory_order::consume``","2025-02 (Hagenberg)","","","" +"`P2786R13 `__","Trivial Relocatability For C++26","2025-02 (Hagenberg)","","","" +"`P3137R3 `__","``views::to_input``","2025-02 (Hagenberg)","","","" +"`P0472R3 `__","Put ``std::monostate`` in ````","2025-02 (Hagenberg)","","","" +"`P3349R1 `__","Converting contiguous iterators to pointers","2025-02 (Hagenberg)","","","" +"`P3372R3 `__","constexpr containers and adaptors","2025-02 (Hagenberg)","","","" +"`P3378R2 `__","constexpr exception types","2025-02 (Hagenberg)","","","" +"`P3441R2 `__","Rename ``simd_split`` to ``simd_chunk``","2025-02 (Hagenberg)","","","" +"`P3287R3 `__","Exploration of namespaces for ``std::simd``","2025-02 (Hagenberg)","","","" +"`P2976R1 `__","Freestanding Library: ``algorithm``, ``numeric``, and ``random``","2025-02 (Hagenberg)","","","" +"`P3430R3 `__","simd issues: explicit, unsequenced, identity-element position, and members of disabled simd","2025-02 (Hagenberg)","","","" +"`P2663R7 `__","Interleaved complex values support in ``std::simd``","2025-02 (Hagenberg)","","","" +"`P2933R4 `__","Extend ```` header function with overloads for ``std::simd``","2025-02 (Hagenberg)","","","" +"`P2846R6 `__","``reserve_hint``: Eagerly reserving memory for not-quite-sized lazy ranges","2025-02 (Hagenberg)","","","" +"`P3471R4 `__","Standard Library Hardening","2025-02 (Hagenberg)","","","" +"`P0447R28 `__","Introduction of ``std::hive`` to the standard library","2025-02 (Hagenberg)","","","" +"`P3019R14 `__","``indirect`` and ``polymorphic``: Vocabulary Types for Composite Class Design","2025-02 (Hagenberg)","","","" +"","","","","","" diff --git a/libcxx/docs/index.rst b/libcxx/docs/index.rst index 53c6b84c22ea7..2c4b865050306 100644 --- a/libcxx/docs/index.rst +++ b/libcxx/docs/index.rst @@ -128,14 +128,14 @@ Libc++ aims to support common compilers that implement the C++11 Standard. In or good balance between stability for users and maintenance cost, testing coverage and development velocity, libc++ drops support for older compilers as newer ones are released. -============ =============== ========================== ===================== -Compiler Versions Restrictions Support policy -============ =============== ========================== ===================== -Clang 17, 18, 19-git latest two stable releases per `LLVM's release page `_ and the development version -AppleClang 15 latest stable release per `Xcode's release page `_ -Open XL 17.1 (AIX) latest stable release per `Open XL's documentation page `_ -GCC 14 In C++11 or later only latest stable release per `GCC's release page `_ -============ =============== ========================== ===================== +============ =================== ========================== ===================== +Compiler Versions Restrictions Support policy +============ =================== ========================== ===================== +Clang 18, 19, 20, 21-git latest two stable releases per `LLVM's release page `_ and the development version +AppleClang 15 latest stable release per `Xcode's release page `_ +Open XL 17.1 (AIX) latest stable release per `Open XL's documentation page `_ +GCC 14 In C++11 or later only latest stable release per `GCC's release page `_ +============ =================== ========================== ===================== Libc++ also supports common platforms and architectures: diff --git a/libcxx/include/__mbstate_t.h b/libcxx/include/__mbstate_t.h index e013384454b41..c23ea7113ca70 100644 --- a/libcxx/include/__mbstate_t.h +++ b/libcxx/include/__mbstate_t.h @@ -43,12 +43,12 @@ # include // works on most Unixes #elif __has_include() # include // works on Darwin -#elif _LIBCPP_HAS_WIDE_CHARACTERS && __has_include_next() -# include_next // fall back to the C standard provider of mbstate_t +#elif __has_include_next() +# include_next // use the C standard provider of mbstate_t if present #elif __has_include_next() -# include_next // is also required to make mbstate_t visible +# include_next // Try in absence of for mbstate_t #else -# error "We don't know how to get the definition of mbstate_t without on your platform." +# error "We don't know how to get the definition of mbstate_t on your platform." #endif #endif // _LIBCPP___MBSTATE_T_H diff --git a/libcxx/src/experimental/tzdb.cpp b/libcxx/src/experimental/tzdb.cpp index f38f495c2d0bb..1f18226636fd5 100644 --- a/libcxx/src/experimental/tzdb.cpp +++ b/libcxx/src/experimental/tzdb.cpp @@ -708,6 +708,39 @@ void __init_tzdb(tzdb& __tzdb, __tz::__rules_storage_type& __rules) { std::__throw_runtime_error("unknown time zone"); } #else // ifdef _WIN32 + +[[nodiscard]] static string __current_zone_environment() { + if (const char* __tz = std::getenv("TZ")) + return __tz; + + return {}; +} + +[[nodiscard]] static string __current_zone_etc_localtime() { + filesystem::path __path = "/etc/localtime"; + if (!filesystem::exists(__path) || !filesystem::is_symlink(__path)) + return {}; + + filesystem::path __tz = filesystem::read_symlink(__path); + // The path may be a relative path, in that case convert it to an absolute + // path based on the proper initial directory. + if (__tz.is_relative()) + __tz = filesystem::canonical("/etc" / __tz); + + return filesystem::relative(__tz, "/usr/share/zoneinfo/"); +} + +[[nodiscard]] static string __current_zone_etc_timezone() { + filesystem::path __path = "/etc/timezone"; + if (!filesystem::exists(__path)) + return {}; + + ifstream __f(__path); + string __name; + std::getline(__f, __name); + return __name; +} + [[nodiscard]] static const time_zone* __current_zone_posix(const tzdb& tzdb) { // On POSIX systems there are several ways to configure the time zone. // In order of priority they are: @@ -726,30 +759,28 @@ void __init_tzdb(tzdb& __tzdb, __tz::__rules_storage_type& __rules) { // // - The time zone name is the target of the symlink /etc/localtime // relative to /usr/share/zoneinfo/ + // + // - The file /etc/timezone. This text file contains the name of the time + // zone. + // + // On Linux systems it seems /etc/timezone is deprecated and being phased + // out. This file is used when /etc/localtime does not exist, or when it exists but is not a symlink. For more information and links see + // https://github.com/llvm/llvm-project/issues/105634 - // The algorithm is like this: - // - If the environment variable TZ is set and points to a valid - // record use this value. - // - Else use the name based on the `/etc/localtime` symlink. + string __name = chrono::__current_zone_environment(); - if (const char* __tz = getenv("TZ")) - if (const time_zone* __result = tzdb.__locate_zone(__tz)) + // Ignore invalid names in the environment. + if (!__name.empty()) + if (const time_zone* __result = tzdb.__locate_zone(__name)) return __result; - filesystem::path __path = "/etc/localtime"; - if (!filesystem::exists(__path)) - std::__throw_runtime_error("tzdb: the symlink '/etc/localtime' does not exist"); - - if (!filesystem::is_symlink(__path)) - std::__throw_runtime_error("tzdb: the path '/etc/localtime' is not a symlink"); + __name = chrono::__current_zone_etc_localtime(); + if (__name.empty()) + __name = chrono::__current_zone_etc_timezone(); - filesystem::path __tz = filesystem::read_symlink(__path); - // The path may be a relative path, in that case convert it to an absolute - // path based on the proper initial directory. - if (__tz.is_relative()) - __tz = filesystem::canonical("/etc" / __tz); + if (__name.empty()) + std::__throw_runtime_error("tzdb: unable to determine the name of the current time zone"); - string __name = filesystem::relative(__tz, "/usr/share/zoneinfo/"); if (const time_zone* __result = tzdb.__locate_zone(__name)) return __result; diff --git a/libcxx/test/libcxx/gdb/gdb_pretty_printer_test.sh.cpp b/libcxx/test/libcxx/gdb/gdb_pretty_printer_test.sh.cpp index 6509bb58140ab..dcdce261298c1 100644 --- a/libcxx/test/libcxx/gdb/gdb_pretty_printer_test.sh.cpp +++ b/libcxx/test/libcxx/gdb/gdb_pretty_printer_test.sh.cpp @@ -12,7 +12,7 @@ // UNSUPPORTED: c++03 // TODO: Investigate these failures which break the CI. -// UNSUPPORTED: clang-18, clang-19, clang-20 +// UNSUPPORTED: clang-18, clang-19, clang-20, clang-21 // The Android libc++ tests are run on a non-Android host, connected to an // Android device over adb. gdb needs special support to make this work (e.g. diff --git a/lld/test/ELF/lto/arm-rtlibcall.ll b/lld/test/ELF/lto/arm-rtlibcall.ll new file mode 100644 index 0000000000000..b254fa0c034e8 --- /dev/null +++ b/lld/test/ELF/lto/arm-rtlibcall.ll @@ -0,0 +1,126 @@ +; REQUIRES: arm +;; Test for LTO optimizing out references to symbols that are pulled in by +;; compiler-generated libcalls (post LTO). +;; Lazy files extracted post LTO compilation might reference other lazy files. +;; Referenced relocatable files are extracted and everything works as intended. +;; However, if the referenced lazy file is a bitcode file, no further LTO +;; compilation occurs. lld currently treats any symbols from that bitcode file +;; as absolute, which leads to a "refer to absolute symbol" error in PIC links +;; and leads to silently broken output. For example, lazy aeabi_ldivmod.o post +;; LTO extraction might call __divmoddi4 defined in an unextracted lazy bitcode +;; file (https://github.com/llvm/llvm-project/issues/127284). +; RUN: rm -rf %t && split-file %s %t && cd %t +; RUN: llvm-as divmoddi4.ll -o divmoddi4.bc +; RUN: llvm-mc -filetype=obj -triple=armv7-none-unknown-eabi aeabi_ldivmod.s -o aeabi_ldivmod.o +;; With an explicit __aebi_ldivmod call in the input IR this works as expected: +; RUN: llvm-as main-explicit.ll -o main-explicit-ldivmod.bc +; RUN: ld.lld main-explicit-ldivmod.bc --start-lib aeabi_ldivmod.o divmoddi4.bc --end-lib -o out.explicit +; RUN: llvm-objdump -d -r -t out.explicit | FileCheck %s --check-prefix=GOOD-DUMP +; GOOD-DUMP-LABEL: SYMBOL TABLE: +; GOOD-DUMP: [[#]] g F .text 00000024 __aeabi_ldivmod +; GOOD-DUMP: [[#]] g F .text [[#]] __divmoddi4 +; GOOD-DUMP-LABEL: <__aeabi_ldivmod>: +; GOOD-DUMP: bl 0x20140 <__divmoddi4> @ imm = #0x28 + +;; But if the call is generated by ISel, we end up with an invalid reference: +; RUN: llvm-as main-implicit.ll -o main-implicit-ldivmod.bc +; RUN: ld.lld main-implicit-ldivmod.bc --start-lib aeabi_ldivmod.o divmoddi4.bc --end-lib -o out.implicit +; RUN: llvm-objdump -d -r -t out.implicit | FileCheck %s --check-prefix=BAD-DUMP +;; We jump to address zero here and __divmoddi4 ends up being an absolute symbol: +; BAD-DUMP-LABEL: SYMBOL TABLE: +; BAD-DUMP: [[#]] g F .text 00000024 __aeabi_ldivmod +; BAD-DUMP: [[#]] g *ABS* 00000000 __divmoddi4 +; BAD-DUMP-LABEL: <__aeabi_ldivmod>: +; BAD-DUMP: bl 0x0 <__divmoddi4> +;; Linking with -pie complains about the invalid relocation (and even points back to the source files) +; RUN: not ld.lld main-implicit-ldivmod.bc --start-lib aeabi_ldivmod.o divmoddi4.bc --end-lib -o out.pie --no-undefined -pie --no-dynamic-linker 2>&1 | FileCheck %s --check-prefix=PIE-ERROR +PIE-ERROR: ld.lld: error: relocation R_ARM_CALL cannot refer to absolute symbol: __divmoddi4 +PIE-ERROR-NEXT: >>> defined in divmoddi4.bc +PIE-ERROR-NEXT: >>> referenced by aeabi_ldivmod.o:(__aeabi_ldivmod) +;; Removing --start-lib/--end-lib also ensures that the reference is retained +; RUN: ld.lld main-implicit-ldivmod.bc aeabi_ldivmod.o divmoddi4.bc -o out.nolib +; RUN: llvm-objdump -d -r -t out.nolib | FileCheck %s --check-prefix=GOOD-DUMP + +;; Interestingly, just declaring __aeabi_ldivmod is sufficient to not run into this issue. +; RUN: llvm-as main-declared.ll -o main-declared-ldivmod.bc +; RUN: ld.lld main-declared-ldivmod.bc --start-lib aeabi_ldivmod.o divmoddi4.bc --end-lib -o out.declared +; RUN: llvm-objdump -d -r -t out.declared | FileCheck %s --check-prefix=GOOD-DUMP + +;--- divmoddi4.ll +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv7-none-unknown-eabi" + +; Adding it to llvm.used does not appears to have any effect! +;; @llvm.used = appending global [1 x ptr] [ptr @__divmoddi4], section "llvm.metadata" + +; Stub version of the real __divmoddi4 +define i64 @__divmoddi4(i64 %a, i64 %b, ptr writeonly %rem) #0 align 32 { +entry: + %sub = sub i64 %a, %b + store i64 0, ptr %rem, align 8 + ret i64 %sub +} + +attributes #0 = { mustprogress nofree noinline norecurse nosync nounwind willreturn memory(argmem: write) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a15" } + +;--- aeabi_ldivmod.s +.syntax unified +.p2align 2 +.arm +.globl __aeabi_ldivmod +.type __aeabi_ldivmod,%function +__aeabi_ldivmod: + push {r6, lr} + sub sp, sp, #16 + add r6, sp, #8 + str r6, [sp] + bl __divmoddi4 + ldr r2, [sp, #8] + ldr r3, [sp, #12] + add sp, sp, #16 + pop {r6, pc} +.size __aeabi_ldivmod, . - __aeabi_ldivmod + +;--- main-implicit.ll +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv7-none-unknown-eabi" + +define dso_local i64 @_start(i64 %num, i64 %denom) local_unnamed_addr #0 { +entry: + %div = sdiv i64 %num, %denom + %ret = add i64 %div, 2 + ret i64 %ret +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a15" } + +;--- main-explicit.ll +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv7-none-unknown-eabi" + +declare { i64, i64 } @__aeabi_ldivmod(i64, i64) + +define dso_local noundef i64 @_start(i64 noundef %num, i64 noundef %denom) local_unnamed_addr #0 { +entry: + %quotrem = call { i64, i64 } @__aeabi_ldivmod(i64 %num, i64 %denom) + %div = extractvalue { i64, i64 } %quotrem, 0 + %ret = add i64 %div, 2 + ret i64 %ret +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a15" } + +;--- main-declared.ll +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv7-none-unknown-eabi" + +declare { i64, i64 } @__aeabi_ldivmod(i64, i64) + +define dso_local i64 @_start(i64 %num, i64 %denom) local_unnamed_addr #0 { +entry: + %div = sdiv i64 %num, %denom + %ret = add i64 %div, 2 + ret i64 %ret +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a15" } diff --git a/lld/test/MachO/export-options.s b/lld/test/MachO/export-options.s index 1a5d409e83383..df2f90e3a4f3f 100644 --- a/lld/test/MachO/export-options.s +++ b/lld/test/MachO/export-options.s @@ -117,13 +117,15 @@ ## Check that only string-literal patterns match ## Check that comments and blank lines are stripped from symbol list # RUN: %lld -dylib %t/symdefs.o -o %t/literal \ -# RUN: -exported_symbols_list %t/literals.txt +# RUN: -exported_symbols_list %t/literals.txt \ +# RUN: -exported_symbol singleton # RUN: llvm-objdump --macho --exports-trie %t/literal | \ # RUN: FileCheck --check-prefix=LITERAL %s # LITERAL-DAG: literal_only # LITERAL-DAG: literal_also # LITERAL-DAG: globby_also +# LITERAL-DAG: singleton # LITERAL-NOT: globby_only ## Check that only glob patterns match @@ -245,7 +247,7 @@ _keep_lazy: #--- symdefs.s -.globl literal_only, literal_also, globby_only, globby_also +.globl literal_only, literal_also, globby_only, globby_also, singleton literal_only: retq literal_also: @@ -254,6 +256,8 @@ globby_only: retq globby_also: retq +singleton: + retq #--- literals.txt diff --git a/lldb/include/lldb/Breakpoint/BreakpointList.h b/lldb/include/lldb/Breakpoint/BreakpointList.h index a7399d385f6f0..4a921fadfc066 100644 --- a/lldb/include/lldb/Breakpoint/BreakpointList.h +++ b/lldb/include/lldb/Breakpoint/BreakpointList.h @@ -163,8 +163,7 @@ class BreakpointList { bool m_is_internal; public: - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable BreakpointIterable; BreakpointIterable Breakpoints() { return BreakpointIterable(m_breakpoints, GetMutex()); diff --git a/lldb/include/lldb/Breakpoint/BreakpointLocationCollection.h b/lldb/include/lldb/Breakpoint/BreakpointLocationCollection.h index 34bd309864871..3aef1d658c0e5 100644 --- a/lldb/include/lldb/Breakpoint/BreakpointLocationCollection.h +++ b/lldb/include/lldb/Breakpoint/BreakpointLocationCollection.h @@ -165,8 +165,7 @@ class BreakpointLocationCollection { mutable std::mutex m_collection_mutex; public: - typedef AdaptedIterable + typedef llvm::iterator_range BreakpointLocationCollectionIterable; BreakpointLocationCollectionIterable BreakpointLocations() { return BreakpointLocationCollectionIterable(m_break_loc_collection); diff --git a/lldb/include/lldb/Breakpoint/BreakpointLocationList.h b/lldb/include/lldb/Breakpoint/BreakpointLocationList.h index f76a8fcfdd7e7..17dc0bfe03148 100644 --- a/lldb/include/lldb/Breakpoint/BreakpointLocationList.h +++ b/lldb/include/lldb/Breakpoint/BreakpointLocationList.h @@ -204,8 +204,7 @@ class BreakpointLocationList { BreakpointLocationCollection *m_new_location_recorder; public: - typedef AdaptedIterable + typedef llvm::iterator_range BreakpointLocationIterable; BreakpointLocationIterable BreakpointLocations() { diff --git a/lldb/include/lldb/Breakpoint/WatchpointList.h b/lldb/include/lldb/Breakpoint/WatchpointList.h index bf87495d79dba..d037d36e64290 100644 --- a/lldb/include/lldb/Breakpoint/WatchpointList.h +++ b/lldb/include/lldb/Breakpoint/WatchpointList.h @@ -39,8 +39,7 @@ class WatchpointList { ~WatchpointList(); typedef std::list wp_collection; - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable WatchpointIterable; /// Add a Watchpoint to the list. diff --git a/lldb/include/lldb/Breakpoint/WatchpointResource.h b/lldb/include/lldb/Breakpoint/WatchpointResource.h index 070d84cff8f26..c1a81fc486eb6 100644 --- a/lldb/include/lldb/Breakpoint/WatchpointResource.h +++ b/lldb/include/lldb/Breakpoint/WatchpointResource.h @@ -39,8 +39,7 @@ class WatchpointResource void SetType(bool read, bool write); typedef std::vector WatchpointCollection; - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable WatchpointIterable; /// Iterate over the watchpoint constituents for this resource diff --git a/lldb/include/lldb/Core/ModuleList.h b/lldb/include/lldb/Core/ModuleList.h index 43d931a844740..29b87de88520d 100644 --- a/lldb/include/lldb/Core/ModuleList.h +++ b/lldb/include/lldb/Core/ModuleList.h @@ -521,14 +521,13 @@ class ModuleList { Notifier *m_notifier = nullptr; public: - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable ModuleIterable; ModuleIterable Modules() const { return ModuleIterable(m_modules, GetMutex()); } - typedef AdaptedIterable + typedef llvm::iterator_range ModuleIterableNoLocking; ModuleIterableNoLocking ModulesNoLocking() const { return ModuleIterableNoLocking(m_modules); diff --git a/lldb/include/lldb/Core/ModuleSpec.h b/lldb/include/lldb/Core/ModuleSpec.h index 4cbbbfa8a26e1..86be0383f8b47 100644 --- a/lldb/include/lldb/Core/ModuleSpec.h +++ b/lldb/include/lldb/Core/ModuleSpec.h @@ -389,8 +389,7 @@ class ModuleSpecList { } typedef std::vector collection; - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable ModuleSpecIterable; ModuleSpecIterable ModuleSpecs() { diff --git a/lldb/include/lldb/Core/Telemetry.h b/lldb/include/lldb/Core/Telemetry.h index 60a7097de5eee..b72556ecaf3c9 100644 --- a/lldb/include/lldb/Core/Telemetry.h +++ b/lldb/include/lldb/Core/Telemetry.h @@ -56,17 +56,24 @@ struct LLDBBaseTelemetryInfo : public llvm::telemetry::TelemetryInfo { void serialize(llvm::telemetry::Serializer &serializer) const override; }; -/// The base Telemetry manager instance in LLDB +/// The base Telemetry manager instance in LLDB. /// This class declares additional instrumentation points /// applicable to LLDB. class TelemetryManager : public llvm::telemetry::Manager { public: + llvm::Error preDispatch(llvm::telemetry::TelemetryInfo *entry) override; + + virtual llvm::StringRef GetInstanceName() const = 0; + static TelemetryManager *getInstance(); + +protected: TelemetryManager(std::unique_ptr config); - llvm::Error preDispatch(llvm::telemetry::TelemetryInfo *entry) override; + static void setInstance(std::unique_ptr manger); private: std::unique_ptr m_config; + static std::unique_ptr g_instance; }; } // namespace telemetry diff --git a/lldb/include/lldb/Host/common/NativeProcessProtocol.h b/lldb/include/lldb/Host/common/NativeProcessProtocol.h index 744699210d4b5..1d5fecfcd5c27 100644 --- a/lldb/include/lldb/Host/common/NativeProcessProtocol.h +++ b/lldb/include/lldb/Host/common/NativeProcessProtocol.h @@ -51,13 +51,9 @@ class NativeProcessProtocol { virtual ~NativeProcessProtocol() = default; typedef std::vector> thread_collection; - template - static NativeThreadProtocol &thread_list_adapter(I &iter) { - assert(*iter); - return **iter; - } - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable< + std::recursive_mutex, thread_collection, + llvm::pointee_iterator> ThreadIterable; virtual Status Resume(const ResumeActionList &resume_actions) = 0; diff --git a/lldb/include/lldb/Symbol/ObjectFile.h b/lldb/include/lldb/Symbol/ObjectFile.h index d89314d44bf67..874926da2ceb7 100644 --- a/lldb/include/lldb/Symbol/ObjectFile.h +++ b/lldb/include/lldb/Symbol/ObjectFile.h @@ -81,9 +81,14 @@ class ObjectFile : public std::enable_shared_from_this, enum BinaryType { eBinaryTypeInvalid = 0, eBinaryTypeUnknown, - eBinaryTypeKernel, /// kernel binary - eBinaryTypeUser, /// user process binary - eBinaryTypeStandalone /// standalone binary / firmware + /// kernel binary + eBinaryTypeKernel, + /// user process binary, dyld addr + eBinaryTypeUser, + /// user process binary, dyld_all_image_infos addr + eBinaryTypeUserAllImageInfos, + /// standalone binary / firmware + eBinaryTypeStandalone }; struct LoadableData { diff --git a/lldb/include/lldb/Symbol/SymbolContext.h b/lldb/include/lldb/Symbol/SymbolContext.h index 69fbe544c73cd..8b6317c6f33c2 100644 --- a/lldb/include/lldb/Symbol/SymbolContext.h +++ b/lldb/include/lldb/Symbol/SymbolContext.h @@ -467,7 +467,7 @@ class SymbolContextList { const_iterator begin() const { return m_symbol_contexts.begin(); } const_iterator end() const { return m_symbol_contexts.end(); } - typedef AdaptedIterable + typedef llvm::iterator_range SymbolContextIterable; SymbolContextIterable SymbolContexts() { return SymbolContextIterable(m_symbol_contexts); diff --git a/lldb/include/lldb/Symbol/TypeList.h b/lldb/include/lldb/Symbol/TypeList.h index d58772ad5b62e..6a38babd942ab 100644 --- a/lldb/include/lldb/Symbol/TypeList.h +++ b/lldb/include/lldb/Symbol/TypeList.h @@ -39,8 +39,7 @@ class TypeList { lldb::TypeSP GetTypeAtIndex(uint32_t idx); typedef std::vector collection; - typedef AdaptedIterable - TypeIterable; + typedef llvm::iterator_range TypeIterable; TypeIterable Types() { return TypeIterable(m_types); } diff --git a/lldb/include/lldb/Symbol/TypeMap.h b/lldb/include/lldb/Symbol/TypeMap.h index 89011efab5c31..6c36ff9369fa5 100644 --- a/lldb/include/lldb/Symbol/TypeMap.h +++ b/lldb/include/lldb/Symbol/TypeMap.h @@ -44,7 +44,8 @@ class TypeMap { lldb::TypeSP FirstType() const; typedef std::multimap collection; - typedef AdaptedIterable TypeIterable; + typedef llvm::iterator_range> + TypeIterable; TypeIterable Types() const { return TypeIterable(m_types); } diff --git a/lldb/include/lldb/Target/QueueList.h b/lldb/include/lldb/Target/QueueList.h index 7c74a6a99ac18..3f177c90d3989 100644 --- a/lldb/include/lldb/Target/QueueList.h +++ b/lldb/include/lldb/Target/QueueList.h @@ -48,9 +48,7 @@ class QueueList { lldb::QueueSP GetQueueAtIndex(uint32_t idx); typedef std::vector collection; - typedef LockingAdaptedIterable - QueueIterable; + typedef LockingAdaptedIterable QueueIterable; /// Iterate over the list of queues /// diff --git a/lldb/include/lldb/Target/TargetList.h b/lldb/include/lldb/Target/TargetList.h index a0cddc6b2966f..080a6039c7ff8 100644 --- a/lldb/include/lldb/Target/TargetList.h +++ b/lldb/include/lldb/Target/TargetList.h @@ -44,8 +44,7 @@ class TargetList : public Broadcaster { } typedef std::vector collection; - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable TargetIterable; /// Create a new Target. diff --git a/lldb/include/lldb/Target/ThreadCollection.h b/lldb/include/lldb/Target/ThreadCollection.h index 29f5103e7eec7..3fe62787649f4 100644 --- a/lldb/include/lldb/Target/ThreadCollection.h +++ b/lldb/include/lldb/Target/ThreadCollection.h @@ -20,8 +20,7 @@ namespace lldb_private { class ThreadCollection { public: typedef std::vector collection; - typedef LockingAdaptedIterable + typedef LockingAdaptedIterable ThreadIterable; ThreadCollection(); diff --git a/lldb/include/lldb/Utility/Iterable.h b/lldb/include/lldb/Utility/Iterable.h index 5c38e46feb925..db1f0e65ef6f1 100644 --- a/lldb/include/lldb/Utility/Iterable.h +++ b/lldb/include/lldb/Utility/Iterable.h @@ -11,172 +11,37 @@ #include +#include namespace lldb_private { -template E map_adapter(I &iter) { - return iter->second; -} - -template E vector_adapter(I &iter) { return *iter; } - -template E list_adapter(I &iter) { return *iter; } - -template -class AdaptedConstIterator { -public: - typedef typename C::const_iterator BackingIterator; - - // Wrapping constructor - AdaptedConstIterator(BackingIterator backing_iterator) - : m_iter(backing_iterator) {} - - // Default-constructible - AdaptedConstIterator() : m_iter() {} - - // Copy-constructible - AdaptedConstIterator(const AdaptedConstIterator &rhs) : m_iter(rhs.m_iter) {} - - // Copy-assignable - AdaptedConstIterator &operator=(const AdaptedConstIterator &rhs) { - m_iter = rhs.m_iter; - return *this; - } - - // Destructible - ~AdaptedConstIterator() = default; - - // Comparable - bool operator==(const AdaptedConstIterator &rhs) { - return m_iter == rhs.m_iter; - } - - bool operator!=(const AdaptedConstIterator &rhs) { - return m_iter != rhs.m_iter; - } - - // Rvalue dereferenceable - E operator*() { return (*A)(m_iter); } - - E operator->() { return (*A)(m_iter); } - - // Offset dereferenceable - E operator[](typename BackingIterator::difference_type offset) { - return AdaptedConstIterator(m_iter + offset); - } - - // Incrementable - AdaptedConstIterator &operator++() { - m_iter++; - return *this; - } - - // Decrementable - AdaptedConstIterator &operator--() { - m_iter--; - return *this; - } - - // Compound assignment - AdaptedConstIterator & - operator+=(typename BackingIterator::difference_type offset) { - m_iter += offset; - return *this; - } - - AdaptedConstIterator & - operator-=(typename BackingIterator::difference_type offset) { - m_iter -= offset; - return *this; - } - - // Arithmetic - AdaptedConstIterator - operator+(typename BackingIterator::difference_type offset) { - return AdaptedConstIterator(m_iter + offset); - } - - AdaptedConstIterator - operator-(typename BackingIterator::difference_type offset) { - return AdaptedConstIterator(m_iter - offset); - } - - // Comparable - bool operator<(AdaptedConstIterator &rhs) { return m_iter < rhs.m_iter; } - - bool operator<=(AdaptedConstIterator &rhs) { return m_iter <= rhs.m_iter; } - - bool operator>(AdaptedConstIterator &rhs) { return m_iter > rhs.m_iter; } - - bool operator>=(AdaptedConstIterator &rhs) { return m_iter >= rhs.m_iter; } - - template - friend AdaptedConstIterator - operator+(typename C1::const_iterator::difference_type, - AdaptedConstIterator &); - - template - friend typename C1::const_iterator::difference_type - operator-(AdaptedConstIterator &, - AdaptedConstIterator &); - - template - friend void swap(AdaptedConstIterator &, - AdaptedConstIterator &); - -private: - BackingIterator m_iter; -}; - -template -AdaptedConstIterator operator+( - typename AdaptedConstIterator::BackingIterator::difference_type - offset, - AdaptedConstIterator &rhs) { - return rhs.operator+(offset); -} - -template -typename AdaptedConstIterator::BackingIterator::difference_type -operator-(AdaptedConstIterator &lhs, - AdaptedConstIterator &rhs) { - return (lhs.m_iter - rhs.m_iter); -} - -template -void swap(AdaptedConstIterator &lhs, - AdaptedConstIterator &rhs) { - std::swap(lhs.m_iter, rhs.m_iter); -} - -template -class AdaptedIterable { -private: - const C &m_container; - -public: - AdaptedIterable(const C &container) : m_container(container) {} - - AdaptedConstIterator begin() { - return AdaptedConstIterator(m_container.begin()); - } - - AdaptedConstIterator end() { - return AdaptedConstIterator(m_container.end()); - } +template ::value_type::second_type> +struct ValueMapIterator + : llvm::iterator_adaptor_base< + ValueMapIterator, WrappedIteratorT, + typename std::iterator_traits::iterator_category, + T> { + ValueMapIterator() = default; + explicit ValueMapIterator(WrappedIteratorT u) + : ValueMapIterator::iterator_adaptor_base(std::move(u)) {} + + const T &operator*() { return (*this->I).second; } + const T &operator*() const { return (*this->I).second; } }; -template -class LockingAdaptedIterable : public AdaptedIterable { +template +class LockingAdaptedIterable : public llvm::iterator_range { public: LockingAdaptedIterable(const C &container, MutexType &mutex) - : AdaptedIterable(container), m_mutex(&mutex) { + : llvm::iterator_range(container), m_mutex(&mutex) { m_mutex->lock(); } LockingAdaptedIterable(LockingAdaptedIterable &&rhs) - : AdaptedIterable(rhs), m_mutex(rhs.m_mutex) { + : llvm::iterator_range(rhs), m_mutex(rhs.m_mutex) { rhs.m_mutex = nullptr; } diff --git a/lldb/source/Core/Telemetry.cpp b/lldb/source/Core/Telemetry.cpp index 0d0d7c1df3bb9..5222f76704f91 100644 --- a/lldb/source/Core/Telemetry.cpp +++ b/lldb/source/Core/Telemetry.cpp @@ -29,10 +29,7 @@ namespace lldb_private { namespace telemetry { -using ::llvm::Error; -using ::llvm::telemetry::Destination; -using ::llvm::telemetry::Serializer; -using ::llvm::telemetry::TelemetryInfo; +using namespace llvm::telemetry; static uint64_t ToNanosec(const SteadyTimePoint Point) { return std::chrono::nanoseconds(Point.time_since_epoch()).count(); @@ -46,28 +43,34 @@ void LLDBBaseTelemetryInfo::serialize(Serializer &serializer) const { serializer.write("end_time", ToNanosec(end_time.value())); } -[[maybe_unused]] static std::string MakeUUID(lldb_private::Debugger *debugger) { +[[maybe_unused]] static std::string MakeUUID(Debugger *debugger) { uint8_t random_bytes[16]; if (auto ec = llvm::getRandomBytes(random_bytes, 16)) { LLDB_LOG(GetLog(LLDBLog::Object), "Failed to generate random bytes for UUID: {0}", ec.message()); - // fallback to using timestamp + debugger ID. + // Fallback to using timestamp + debugger ID. return llvm::formatv( "{0}_{1}", std::chrono::steady_clock::now().time_since_epoch().count(), debugger->GetID()); } - return lldb_private::UUID(random_bytes).GetAsString(); + return UUID(random_bytes).GetAsString(); } -TelemetryManager::TelemetryManager( - std::unique_ptr config) +TelemetryManager::TelemetryManager(std::unique_ptr config) : m_config(std::move(config)) {} llvm::Error TelemetryManager::preDispatch(TelemetryInfo *entry) { // Do nothing for now. // In up-coming patch, this would be where the manager // attach the session_uuid to the entry. - return Error::success(); + return llvm::Error::success(); +} + +std::unique_ptr TelemetryManager::g_instance = nullptr; +TelemetryManager *TelemetryManager::getInstance() { return g_instance.get(); } + +void TelemetryManager::setInstance(std::unique_ptr manager) { + g_instance = std::move(manager); } } // namespace telemetry diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp index 4e356a7c8f5d9..a19322ff1e263 100644 --- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp +++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp @@ -5599,9 +5599,13 @@ bool ObjectFileMachO::GetCorefileMainBinaryInfo(addr_t &value, // struct main_bin_spec // { // uint32_t version; // currently 2 - // uint32_t type; // 0 == unspecified, 1 == kernel, + // uint32_t type; // 0 == unspecified, + // // 1 == kernel // // 2 == user process, + // dyld mach-o binary addr // // 3 == standalone binary + // // 4 == user process, + // // dyld_all_image_infos addr // uint64_t address; // UINT64_MAX if address not specified // uint64_t slide; // slide, UINT64_MAX if unspecified // // 0 if no slide needs to be applied to @@ -5652,6 +5656,7 @@ bool ObjectFileMachO::GetCorefileMainBinaryInfo(addr_t &value, // convert the "main bin spec" type into our // ObjectFile::BinaryType enum const char *typestr = "unrecognized type"; + type = eBinaryTypeInvalid; switch (binspec_type) { case 0: type = eBinaryTypeUnknown; @@ -5669,6 +5674,10 @@ bool ObjectFileMachO::GetCorefileMainBinaryInfo(addr_t &value, type = eBinaryTypeStandalone; typestr = "standalone"; break; + case 4: + type = eBinaryTypeUserAllImageInfos; + typestr = "userland dyld_all_image_infos"; + break; } LLDB_LOGF(log, "LC_NOTE 'main bin spec' found, version %d type %d " diff --git a/lldb/source/Plugins/Process/mach-core/ProcessMachCore.cpp b/lldb/source/Plugins/Process/mach-core/ProcessMachCore.cpp index eef9bd4a175ec..281f3a0db8f69 100644 --- a/lldb/source/Plugins/Process/mach-core/ProcessMachCore.cpp +++ b/lldb/source/Plugins/Process/mach-core/ProcessMachCore.cpp @@ -114,6 +114,7 @@ ProcessMachCore::ProcessMachCore(lldb::TargetSP target_sp, : PostMortemProcess(target_sp, listener_sp, core_file), m_core_aranges(), m_core_range_infos(), m_core_module_sp(), m_dyld_addr(LLDB_INVALID_ADDRESS), + m_dyld_all_image_infos_addr(LLDB_INVALID_ADDRESS), m_mach_kernel_addr(LLDB_INVALID_ADDRESS) {} // Destructor @@ -320,6 +321,9 @@ bool ProcessMachCore::LoadBinariesViaMetadata() { } else if (type == ObjectFile::eBinaryTypeUser) { m_dyld_addr = objfile_binary_value; m_dyld_plugin_name = DynamicLoaderMacOSXDYLD::GetPluginNameStatic(); + } else if (type == ObjectFile::eBinaryTypeUserAllImageInfos) { + m_dyld_all_image_infos_addr = objfile_binary_value; + m_dyld_plugin_name = DynamicLoaderMacOSXDYLD::GetPluginNameStatic(); } else { const bool force_symbol_search = true; const bool notify = true; @@ -466,6 +470,7 @@ void ProcessMachCore::LoadBinariesViaExhaustiveSearch() { addr_t saved_user_dyld_addr = m_dyld_addr; m_mach_kernel_addr = LLDB_INVALID_ADDRESS; m_dyld_addr = LLDB_INVALID_ADDRESS; + m_dyld_all_image_infos_addr = LLDB_INVALID_ADDRESS; addr_t better_kernel_address = DynamicLoaderDarwinKernel::SearchForDarwinKernel(this); @@ -507,6 +512,12 @@ void ProcessMachCore::LoadBinariesAndSetDYLD() { "image at 0x%" PRIx64, __FUNCTION__, m_dyld_addr); m_dyld_plugin_name = DynamicLoaderMacOSXDYLD::GetPluginNameStatic(); + } else if (m_dyld_all_image_infos_addr != LLDB_INVALID_ADDRESS) { + LLDB_LOGF(log, + "ProcessMachCore::%s: Using user process dyld " + "dyld_all_image_infos at 0x%" PRIx64, + __FUNCTION__, m_dyld_all_image_infos_addr); + m_dyld_plugin_name = DynamicLoaderMacOSXDYLD::GetPluginNameStatic(); } } else { if (m_dyld_addr != LLDB_INVALID_ADDRESS) { @@ -515,6 +526,11 @@ void ProcessMachCore::LoadBinariesAndSetDYLD() { "image at 0x%" PRIx64, __FUNCTION__, m_dyld_addr); m_dyld_plugin_name = DynamicLoaderMacOSXDYLD::GetPluginNameStatic(); + } else if (m_dyld_all_image_infos_addr != LLDB_INVALID_ADDRESS) { + LLDB_LOGF(log, + "ProcessMachCore::%s: Using user process dyld " + "dyld_all_image_infos at 0x%" PRIx64, + __FUNCTION__, m_dyld_all_image_infos_addr); } else if (m_mach_kernel_addr != LLDB_INVALID_ADDRESS) { LLDB_LOGF(log, "ProcessMachCore::%s: Using kernel " @@ -763,19 +779,32 @@ void ProcessMachCore::Initialize() { } addr_t ProcessMachCore::GetImageInfoAddress() { - // If we found both a user-process dyld and a kernel binary, we need to - // decide which to prefer. + // The DynamicLoader plugin will call back in to this Process + // method to find the virtual address of one of these: + // 1. The xnu mach kernel binary Mach-O header + // 2. The dyld binary Mach-O header + // 3. dyld's dyld_all_image_infos object + // + // DynamicLoaderMacOSX will accept either the dyld Mach-O header + // address or the dyld_all_image_infos interchangably, no need + // to distinguish between them. It disambiguates by the Mach-O + // file magic number at the start. if (GetCorefilePreference() == eKernelCorefile) { - if (m_mach_kernel_addr != LLDB_INVALID_ADDRESS) { + if (m_mach_kernel_addr != LLDB_INVALID_ADDRESS) return m_mach_kernel_addr; - } - return m_dyld_addr; + if (m_dyld_addr != LLDB_INVALID_ADDRESS) + return m_dyld_addr; } else { - if (m_dyld_addr != LLDB_INVALID_ADDRESS) { + if (m_dyld_addr != LLDB_INVALID_ADDRESS) return m_dyld_addr; - } - return m_mach_kernel_addr; + if (m_mach_kernel_addr != LLDB_INVALID_ADDRESS) + return m_mach_kernel_addr; } + + // m_dyld_addr and m_mach_kernel_addr both + // invalid, return m_dyld_all_image_infos_addr + // in case it has a useful value. + return m_dyld_all_image_infos_addr; } lldb_private::ObjectFile *ProcessMachCore::GetCoreObjectFile() { diff --git a/lldb/source/Plugins/Process/mach-core/ProcessMachCore.h b/lldb/source/Plugins/Process/mach-core/ProcessMachCore.h index 8996ae116614b..6ba9f2354edf9 100644 --- a/lldb/source/Plugins/Process/mach-core/ProcessMachCore.h +++ b/lldb/source/Plugins/Process/mach-core/ProcessMachCore.h @@ -131,6 +131,7 @@ class ProcessMachCore : public lldb_private::PostMortemProcess { VMRangeToPermissions m_core_range_infos; lldb::ModuleSP m_core_module_sp; lldb::addr_t m_dyld_addr; + lldb::addr_t m_dyld_all_image_infos_addr; lldb::addr_t m_mach_kernel_addr; llvm::StringRef m_dyld_plugin_name; }; diff --git a/lldb/unittests/Core/CMakeLists.txt b/lldb/unittests/Core/CMakeLists.txt index 949963fd40346..d4d3764b67ae3 100644 --- a/lldb/unittests/Core/CMakeLists.txt +++ b/lldb/unittests/Core/CMakeLists.txt @@ -1,3 +1,7 @@ +if (LLVM_BUILD_TELEMETRY) + set(TELEMETRY_DEPS Telemetry) +endif() + add_lldb_unittest(LLDBCoreTests CommunicationTest.cpp DiagnosticEventTest.cpp @@ -10,6 +14,7 @@ add_lldb_unittest(LLDBCoreTests RichManglingContextTest.cpp SourceLocationSpecTest.cpp SourceManagerTest.cpp + TelemetryTest.cpp UniqueCStringMapTest.cpp LINK_LIBS @@ -26,4 +31,5 @@ add_lldb_unittest(LLDBCoreTests LLVMTestingSupport LINK_COMPONENTS Support + ${TELEMETRY_DEPS} ) diff --git a/lldb/unittests/Core/TelemetryTest.cpp b/lldb/unittests/Core/TelemetryTest.cpp new file mode 100644 index 0000000000000..0f2eaccb21a2c --- /dev/null +++ b/lldb/unittests/Core/TelemetryTest.cpp @@ -0,0 +1,98 @@ +//===-- TelemetryTest.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Config/llvm-config.h" + +#ifdef LLVM_BUILD_TELEMETRY + +#include "lldb/Core/PluginInterface.h" +#include "lldb/Core/PluginManager.h" +#include "lldb/Core/Telemetry.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Error.h" +#include "llvm/Telemetry/Telemetry.h" +#include "llvm/Testing/Support/Error.h" +#include "gtest/gtest.h" +#include +#include + +namespace lldb_private { + +struct FakeTelemetryInfo : public llvm::telemetry::TelemetryInfo { + std::string msg; +}; + +class TestDestination : public llvm::telemetry::Destination { +public: + TestDestination(std::vector *entries) + : received_entries(entries) {} + + llvm::Error + receiveEntry(const llvm::telemetry::TelemetryInfo *entry) override { + received_entries->push_back(entry); + return llvm::Error::success(); + } + + llvm::StringLiteral name() const override { return "TestDestination"; } + +private: + std::vector *received_entries; +}; + +class FakePlugin : public telemetry::TelemetryManager { +public: + FakePlugin() + : telemetry::TelemetryManager( + std::make_unique(true)) {} + + // TelemetryManager interface + llvm::Error preDispatch(llvm::telemetry::TelemetryInfo *entry) override { + if (auto *fake_entry = llvm::dyn_cast(entry)) + fake_entry->msg = "In FakePlugin"; + + return llvm::Error::success(); + } + + llvm::StringRef GetInstanceName() const override { + return "FakeTelemetryPlugin"; + } + + static void Initialize() { + telemetry::TelemetryManager::setInstance(std::make_unique()); + } + + static void Terminate() { telemetry::TelemetryManager::setInstance(nullptr); } +}; + +} // namespace lldb_private + +TEST(TelemetryTest, PluginTest) { + // This would have been called by the plugin reg in a "real" plugin + // For tests, we just call it directly. + lldb_private::FakePlugin::Initialize(); + + auto *ins = lldb_private::telemetry::TelemetryManager::getInstance(); + ASSERT_NE(ins, nullptr); + + std::vector expected_entries; + ins->addDestination( + std::make_unique(&expected_entries)); + + lldb_private::FakeTelemetryInfo entry; + entry.msg = ""; + + ASSERT_THAT_ERROR(ins->dispatch(&entry), ::llvm::Succeeded()); + ASSERT_EQ(1U, expected_entries.size()); + EXPECT_EQ("In FakePlugin", + llvm::dyn_cast(expected_entries[0]) + ->msg); + + ASSERT_EQ("FakeTelemetryPlugin", ins->GetInstanceName()); +} + +#endif // LLVM_BUILD_TELEMETRY diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst index 899b2cf3b4901..5966d1617feee 100644 --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -990,7 +990,12 @@ supported for the ``amdgcn`` target. the stride must be 0, the "add tid" flag must be 0, the swizzle enable bits must be off, and the extent must be measured in bytes. (On subtargets where bounds checking may be disabled, buffer fat pointers may choose to enable - it or not). + it or not). The cache swizzle support introduced in gfx942 may be used. + + These pointers can be created by `addrspacecast` from a buffer resource + (`ptr addrspace(8)`) or by using `llvm.amdgcn.make.buffer.rsrc` to produce a + `ptr addrspace(7)` directly, which produces a buffer fat pointer with an initial + offset of 0 and prevents the address space cast from being rewritten away. **Buffer Resource** The buffer resource pointer, in address space 8, is the newer form @@ -1027,6 +1032,12 @@ supported for the ``amdgcn`` target. the stride is the size of a structured element, the "add tid" flag must be 0, and the swizzle enable bits must be off. + These pointers can be created by `addrspacecast` from a buffer resource + (`ptr addrspace(8)`) or by using `llvm.amdgcn.make.buffer.rsrc` to produce a + `ptr addrspace(9)` directly, which produces a buffer strided pointer whose initial + index and offset values are both 0. This prevents the address space cast from + being rewritten away. + **Streamout Registers** Dedicated registers used by the GS NGG Streamout Instructions. The register file is modelled as a memory in a distinct address space because it is indexed diff --git a/llvm/docs/GitHub.rst b/llvm/docs/GitHub.rst index b5b75db91e1c4..979b87c8d02f6 100644 --- a/llvm/docs/GitHub.rst +++ b/llvm/docs/GitHub.rst @@ -438,7 +438,7 @@ Releases Backporting Fixes to the Release Branches ----------------------------------------- You can use special comments on issues or pull requests to make backport -requests for the release branches. To do this, after your pull reuest has been +requests for the release branches. To do this, after your pull request has been merged: 1. Edit "Milestone" at the right side of the isssue or pull request diff --git a/llvm/docs/OptBisect.rst b/llvm/docs/OptBisect.rst index 809f54883e5a9..0e4d31acbe71e 100644 --- a/llvm/docs/OptBisect.rst +++ b/llvm/docs/OptBisect.rst @@ -157,7 +157,6 @@ to make this check uniform across all passes. These helper functions are: .. code-block:: c++ bool ModulePass::skipModule(Module &M); - bool CallGraphSCCPass::skipSCC(CallGraphSCC &SCC); bool FunctionPass::skipFunction(const Function &F); bool LoopPass::skipLoop(const Loop *L); diff --git a/llvm/include/llvm/Analysis/CallGraph.h b/llvm/include/llvm/Analysis/CallGraph.h index f5ce2322b76d9..7f977db161c20 100644 --- a/llvm/include/llvm/Analysis/CallGraph.h +++ b/llvm/include/llvm/Analysis/CallGraph.h @@ -129,10 +129,6 @@ class CallGraph { return CallsExternalNode.get(); } - /// Old node has been deleted, and New is to be used in its place, update the - /// ExternalCallingNode. - void ReplaceExternalCallEdge(CallGraphNode *Old, CallGraphNode *New); - //===--------------------------------------------------------------------- // Functions to keep a call graph up to date with a function that has been // modified. @@ -251,18 +247,6 @@ class CallGraphNode { CalledFunctions.pop_back(); } - /// Removes the edge in the node for the specified call site. - /// - /// Note that this method takes linear time, so it should be used sparingly. - void removeCallEdgeFor(CallBase &Call); - - /// Removes all call edges from this node to the specified callee - /// function. - /// - /// This takes more time to execute than removeCallEdgeTo, so it should not - /// be used unless necessary. - void removeAnyCallEdgeTo(CallGraphNode *Callee); - /// Removes one edge associated with a null callsite from this node to /// the specified callee function. void removeOneAbstractEdgeTo(CallGraphNode *Callee); diff --git a/llvm/include/llvm/Analysis/CallGraphSCCPass.h b/llvm/include/llvm/Analysis/CallGraphSCCPass.h index d0d81605436ea..e8714bae8f4d9 100644 --- a/llvm/include/llvm/Analysis/CallGraphSCCPass.h +++ b/llvm/include/llvm/Analysis/CallGraphSCCPass.h @@ -76,11 +76,6 @@ class CallGraphSCCPass : public Pass { /// the call graph. If the derived class implements this method, it should /// always explicitly call the implementation here. void getAnalysisUsage(AnalysisUsage &Info) const override; - -protected: - /// Optional passes call this function to check whether the pass should be - /// skipped. This is the case when optimization bisect is over the limit. - bool skipSCC(CallGraphSCC &SCC) const; }; /// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on. diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index 68ed812222dfd..665c4d6baad80 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1459,6 +1459,23 @@ enum NodeType { VECREDUCE_UMAX, VECREDUCE_UMIN, + // PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) + // The partial reduction nodes sign or zero extend Input1 and Input2 to the + // element type of Accumulator before multiplying their results. + // This result is concatenated to the Accumulator, and this is then reduced, + // using addition, to the result type. + // The output is only expected to either be given to another partial reduction + // operation or an equivalent vector reduce operation, so the order in which + // the elements are reduced is deliberately not specified. + // Input1 and Input2 must be the same type. Accumulator and the output must be + // the same type. + // The number of elements in Input1 and Input2 must be a positive integer + // multiple of the number of elements in the Accumulator / output type. + // Input1 and Input2 must have an element type which is the same as or smaller + // than the element type of the Accumulator and output. + PARTIAL_REDUCE_SMLA, + PARTIAL_REDUCE_UMLA, + // The `llvm.experimental.stackmap` intrinsic. // Operands: input chain, glue, , , [live0[, live1...]] // Outputs: output chain, glue diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/llvm/include/llvm/CodeGen/RegAllocEvictionAdvisor.h similarity index 71% rename from llvm/lib/CodeGen/RegAllocEvictionAdvisor.h rename to llvm/include/llvm/CodeGen/RegAllocEvictionAdvisor.h index 52dd946a68540..a14548ff6959e 100644 --- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h +++ b/llvm/include/llvm/CodeGen/RegAllocEvictionAdvisor.h @@ -9,13 +9,18 @@ #ifndef LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H #define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H +#include "llvm/ADT/Any.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineBlockFrequencyInfo.h" +#include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/Register.h" #include "llvm/Config/llvm-config.h" +#include "llvm/IR/PassManager.h" #include "llvm/MC/MCRegister.h" #include "llvm/Pass.h" +#include "llvm/Support/Compiler.h" namespace llvm { class AllocationOrder; @@ -149,6 +154,35 @@ class RegAllocEvictionAdvisor { const bool EnableLocalReassign; }; +/// Common provider for legacy and new pass managers. +/// This keeps the state for logging, and sets up and holds the provider. +/// The legacy pass itself used to keep the logging state and provider, +/// so this extraction helps the NPM analysis to reuse the logic. +/// TODO: Coalesce this with the NPM analysis when legacy PM is removed. +class RegAllocEvictionAdvisorProvider { +public: + enum class AdvisorMode : int { Default, Release, Development }; + RegAllocEvictionAdvisorProvider(AdvisorMode Mode, LLVMContext &Ctx) + : Ctx(Ctx), Mode(Mode) {} + + virtual ~RegAllocEvictionAdvisorProvider() = default; + + virtual void logRewardIfNeeded(const MachineFunction &MF, + llvm::function_ref GetReward) {} + + virtual std::unique_ptr + getAdvisor(const MachineFunction &MF, const RAGreedy &RA, + MachineBlockFrequencyInfo *MBFI, MachineLoopInfo *Loops) = 0; + + AdvisorMode getAdvisorMode() const { return Mode; } + +protected: + LLVMContext &Ctx; + +private: + const AdvisorMode Mode; +}; + /// ImmutableAnalysis abstraction for fetching the Eviction Advisor. We model it /// as an analysis to decouple the user from the implementation insofar as /// dependencies on other analyses goes. The motivation for it being an @@ -164,20 +198,20 @@ class RegAllocEvictionAdvisor { /// /// Because we need to offer additional services in 'development' mode, the /// implementations of this analysis need to implement RTTI support. -class RegAllocEvictionAdvisorAnalysis : public ImmutablePass { +class RegAllocEvictionAdvisorAnalysisLegacy : public ImmutablePass { public: enum class AdvisorMode : int { Default, Release, Development }; - RegAllocEvictionAdvisorAnalysis(AdvisorMode Mode) - : ImmutablePass(ID), Mode(Mode){}; + RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode Mode) + : ImmutablePass(ID), Mode(Mode) {}; static char ID; /// Get an advisor for the given context (i.e. machine function, etc) - virtual std::unique_ptr - getAdvisor(const MachineFunction &MF, const RAGreedy &RA) = 0; + RegAllocEvictionAdvisorProvider &getProvider() { return *Provider; } + AdvisorMode getAdvisorMode() const { return Mode; } virtual void logRewardIfNeeded(const MachineFunction &MF, - llvm::function_ref GetReward){}; + function_ref GetReward) {}; protected: // This analysis preserves everything, and subclasses may have additional @@ -185,19 +219,65 @@ class RegAllocEvictionAdvisorAnalysis : public ImmutablePass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } + std::unique_ptr Provider; private: StringRef getPassName() const override; const AdvisorMode Mode; }; +/// A MachineFunction analysis for fetching the Eviction Advisor. +/// This sets up the Provider lazily and caches it. +/// - in the ML implementation case, the evaluator is stateless but (especially +/// in the development mode) expensive to set up. With a Module Analysis, we +/// `require` it and set it up once. +/// - in the 'development' mode ML case, we want to capture the training log +/// during allocation (this is a log of features encountered and decisions +/// made), and then measure a score, potentially a few steps after allocation +/// completes. So we need a Module analysis to keep the logger state around +/// until we can make that measurement. +class RegAllocEvictionAdvisorAnalysis + : public AnalysisInfoMixin { + static AnalysisKey Key; + friend AnalysisInfoMixin; + +public: + struct Result { + // owned by this analysis + RegAllocEvictionAdvisorProvider *Provider; + + bool invalidate(MachineFunction &MF, const PreservedAnalyses &PA, + MachineFunctionAnalysisManager::Invalidator &Inv) { + // Provider is stateless and constructed only once. Do not get + // invalidated. + return false; + } + }; + + Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MAM); + +private: + void + initializeProvider(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode Mode, + LLVMContext &Ctx); + + std::unique_ptr Provider; +}; + /// Specialization for the API used by the analysis infrastructure to create /// an instance of the eviction advisor. -template <> Pass *callDefaultCtor(); +template <> Pass *callDefaultCtor(); + +RegAllocEvictionAdvisorAnalysisLegacy *createReleaseModeAdvisorAnalysisLegacy(); + +RegAllocEvictionAdvisorAnalysisLegacy * +createDevelopmentModeAdvisorAnalysisLegacy(); -RegAllocEvictionAdvisorAnalysis *createReleaseModeAdvisor(); +LLVM_ATTRIBUTE_RETURNS_NONNULL RegAllocEvictionAdvisorProvider * +createReleaseModeAdvisorProvider(LLVMContext &Ctx); -RegAllocEvictionAdvisorAnalysis *createDevelopmentModeAdvisor(); +RegAllocEvictionAdvisorProvider * +createDevelopmentModeAdvisorProvider(LLVMContext &Ctx); // TODO: move to RegAllocEvictionAdvisor.cpp when we move implementation // out of RegAllocGreedy.cpp diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index 461c0c1ead16d..cf8e4a3d2513b 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1607,11 +1607,6 @@ class SelectionDAG { /// the target's desired shift amount type. SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op); - /// Create the DAG equivalent of vector_partial_reduce where Op1 and Op2 are - /// its operands and ReducedTY is the intrinsic's return type. - SDValue getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, - SDValue Op2); - /// Expands a node with multiple results to an FP or vector libcall. The /// libcall is expected to take all the operands of the \p Node followed by /// output pointers for each of the results. \p CallRetResNo can be optionally diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index bbecc7a6ddaee..a4c3d042fe3a4 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -5564,6 +5564,10 @@ class TargetLowering : public TargetLoweringBase { /// temporarily, advance store position, before re-loading the final vector. SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const; + /// Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, + /// consisting of zext/sext, extract_subvector, mul and add operations. + SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const; + /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC /// on the current target. A VP_SETCC will additionally be given a Mask /// and/or EVL not equal to SDValue(). diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index 66fd3fb9b0526..79f014edb58c8 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -1232,13 +1232,6 @@ class TargetRegisterInfo : public MCRegisterInfo { return nullptr; } - /// Returns the physical register number of sub-register "Index" - /// for physical register RegNo. Return zero if the sub-register does not - /// exist. - inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const { - return static_cast(this)->getSubReg(Reg, Idx); - } - /// Some targets have non-allocatable registers that aren't technically part /// of the explicit callee saved register list, but should be handled as such /// in certain cases. diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index d25077cae63e4..33b3d7bad4a71 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -2399,6 +2399,7 @@ class OpenMPIRBuilder { CurInfo.NonContigInfo.Strides.end()); } }; + using MapInfosOrErrorTy = Expected; /// Callback function type for functions emitting the host fallback code that /// is executed when the kernel launch fails. It takes an insertion point as @@ -2407,6 +2408,11 @@ class OpenMPIRBuilder { using EmitFallbackCallbackTy = function_ref; + // Callback function type for emitting and fetching user defined custom + // mappers. + using CustomMapperCallbackTy = + function_ref(unsigned int)>; + /// Generate a target region entry call and host fallback call. /// /// \param Loc The location at which the request originated and is fulfilled. @@ -2473,11 +2479,11 @@ class OpenMPIRBuilder { /// return nullptr by reference. Accepts a reference to a MapInfosTy object /// that contains information generated for mappable clauses, /// including base pointers, pointers, sizes, map types, user-defined mappers. - void emitOffloadingArrays( + Error emitOffloadingArrays( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, - TargetDataInfo &Info, bool IsNonContiguous = false, - function_ref DeviceAddrCB = nullptr, - function_ref CustomMapperCB = nullptr); + TargetDataInfo &Info, CustomMapperCallbackTy CustomMapperCB, + bool IsNonContiguous = false, + function_ref DeviceAddrCB = nullptr); /// Allocates memory for and populates the arrays required for offloading /// (offload_{baseptrs|ptrs|mappers|sizes|maptypes|mapnames}). Then, it @@ -2485,12 +2491,12 @@ class OpenMPIRBuilder { /// library. In essence, this function is a combination of /// emitOffloadingArrays and emitOffloadingArraysArgument and should arguably /// be preferred by clients of OpenMPIRBuilder. - void emitOffloadingArraysAndArgs( + Error emitOffloadingArraysAndArgs( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info, TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, - bool IsNonContiguous = false, bool ForEndCall = false, - function_ref DeviceAddrCB = nullptr, - function_ref CustomMapperCB = nullptr); + CustomMapperCallbackTy CustomMapperCB, bool IsNonContiguous = false, + bool ForEndCall = false, + function_ref DeviceAddrCB = nullptr); /// Creates offloading entry for the provided entry ID \a ID, address \a /// Addr, size \a Size, and flags \a Flags. @@ -2950,12 +2956,12 @@ class OpenMPIRBuilder { /// \param FuncName Optional param to specify mapper function name. /// \param CustomMapperCB Optional callback to generate code related to /// custom mappers. - Function *emitUserDefinedMapper( - function_ref + Expected emitUserDefinedMapper( + function_ref PrivAndGenMapInfoCB, llvm::Type *ElemTy, StringRef FuncName, - function_ref CustomMapperCB = nullptr); + CustomMapperCallbackTy CustomMapperCB); /// Generator for '#omp target data' /// @@ -2969,21 +2975,21 @@ class OpenMPIRBuilder { /// \param IfCond Value which corresponds to the if clause condition. /// \param Info Stores all information realted to the Target Data directive. /// \param GenMapInfoCB Callback that populates the MapInfos and returns. + /// \param CustomMapperCB Callback to generate code related to + /// custom mappers. /// \param BodyGenCB Optional Callback to generate the region code. /// \param DeviceAddrCB Optional callback to generate code related to /// use_device_ptr and use_device_addr. - /// \param CustomMapperCB Optional callback to generate code related to - /// custom mappers. InsertPointOrErrorTy createTargetData( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, + CustomMapperCallbackTy CustomMapperCB, omp::RuntimeFunction *MapperFunc = nullptr, function_ref BodyGenCB = nullptr, function_ref DeviceAddrCB = nullptr, - function_ref CustomMapperCB = nullptr, Value *SrcLocInfo = nullptr); using TargetBodyGenCallbackTy = function_ref &Inputs, GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB, TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB, + CustomMapperCallbackTy CustomMapperCB, SmallVector Dependencies = {}, bool HasNowait = false); /// Returns __kmpc_for_static_init_* runtime function for the specified diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index d5d185ebc12f6..9558f2b9b74e0 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -1284,11 +1284,24 @@ defset list AMDGPUImageDimAtomicIntrinsics = { // Data type for buffer resources (V#). Maybe, in the future, we can create a // similar one for textures (T#). def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>; +// Data type for buffer fat pointers, which are a buffer resource (V#) followed by +// a 32-bit offset. These don't exist in hardware and are a compiler-internal +// convenience. +def AMDGPUBufferFatPointerTy : LLVMQualPointerType<7>; let TargetPrefix = "amdgcn" in { +// Create a buffer resource wrapping `base` with the specified `stride` +// `numrecords`, and `flags`. All of these values will need to be +// wave-uniform when the buffer instructions are invoked, so non-uniform +// inputs to this intrinsic will trigger waterfall loops. +// +// In addition to creating ptr addrspace(8), whe representation of buffer +// resources, it can create the fat pointers ptr addrspace(7) and ptr addrspace(9), +// which carry additional offset bits. When this intrinsic is used to create +// these fat pointers, their offset and index fields (if applicable) are zero. def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic < - [AMDGPUBufferRsrcTy], + [llvm_anyptr_ty], [llvm_anyptr_ty, // base llvm_i16_ty, // stride (and swizzle control) llvm_i32_ty, // NumRecords / extent diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index da4ffcd83213a..81a602c8889d8 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -251,7 +251,7 @@ void initializePseudoProbeInserterPass(PassRegistry &); void initializeRAGreedyPass(PassRegistry &); void initializeReachingDefAnalysisPass(PassRegistry &); void initializeReassociateLegacyPassPass(PassRegistry &); -void initializeRegAllocEvictionAdvisorAnalysisPass(PassRegistry &); +void initializeRegAllocEvictionAdvisorAnalysisLegacyPass(PassRegistry &); void initializeRegAllocFastPass(PassRegistry &); void initializeRegAllocPriorityAdvisorAnalysisPass(PassRegistry &); void initializeRegAllocScoringPass(PassRegistry &); diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h index 1458318ff021a..12781e2b84623 100644 --- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h +++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h @@ -57,6 +57,7 @@ #include "llvm/CodeGen/PeepholeOptimizer.h" #include "llvm/CodeGen/PostRASchedulerList.h" #include "llvm/CodeGen/PreISelIntrinsicLowering.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #include "llvm/CodeGen/RegAllocFast.h" #include "llvm/CodeGen/RegUsageInfoCollector.h" #include "llvm/CodeGen/RegUsageInfoPropagate.h" diff --git a/llvm/include/llvm/Passes/MachinePassRegistry.def b/llvm/include/llvm/Passes/MachinePassRegistry.def index 075ebcb829553..2b5e258682585 100644 --- a/llvm/include/llvm/Passes/MachinePassRegistry.def +++ b/llvm/include/llvm/Passes/MachinePassRegistry.def @@ -114,6 +114,7 @@ MACHINE_FUNCTION_ANALYSIS("machine-post-dom-tree", MachinePostDominatorTreeAnalysis()) MACHINE_FUNCTION_ANALYSIS("machine-trace-metrics", MachineTraceMetricsAnalysis()) MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis(PIC)) +MACHINE_FUNCTION_ANALYSIS("regalloc-evict", RegAllocEvictionAdvisorAnalysis()) MACHINE_FUNCTION_ANALYSIS("slot-indexes", SlotIndexesAnalysis()) MACHINE_FUNCTION_ANALYSIS("spill-code-placement", SpillPlacementAnalysis()) MACHINE_FUNCTION_ANALYSIS("virtregmap", VirtRegMapAnalysis()) diff --git a/llvm/include/llvm/SandboxIR/Region.h b/llvm/include/llvm/SandboxIR/Region.h index 66eda34ea2edb..ecc6c2049ef8d 100644 --- a/llvm/include/llvm/SandboxIR/Region.h +++ b/llvm/include/llvm/SandboxIR/Region.h @@ -120,6 +120,10 @@ class Region { /// Set \p I as the \p Idx'th element in the auxiliary vector. /// NOTE: This is for internal use, it does not set the metadata. void setAux(unsigned Idx, Instruction *I); + /// Helper for dropping Aux metadata for \p I. + void dropAuxMetadata(Instruction *I); + /// Remove instruction \p I from Aux and drop metadata. + void removeFromAux(Instruction *I); public: Region(Context &Ctx, TargetTransformInfo &TTI); diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h index c237e1ddd6b38..b13a94cd56f2e 100644 --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -65,7 +65,7 @@ CPUModel getCPUModel(StringRef CPU); } // namespace RISCV -namespace RISCVII { +namespace RISCVVType { enum VLMUL : uint8_t { LMUL_1 = 0, LMUL_2, @@ -82,9 +82,7 @@ enum { TAIL_AGNOSTIC = 1, MASK_AGNOSTIC = 2, }; -} // namespace RISCVII -namespace RISCVVType { // Is this a SEW value that can be encoded into the VTYPE format. inline static bool isValidSEW(unsigned SEW) { return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 64; @@ -95,21 +93,21 @@ inline static bool isValidLMUL(unsigned LMUL, bool Fractional) { return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1); } -unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, +unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic); -inline static RISCVII::VLMUL getVLMUL(unsigned VType) { - unsigned VLMUL = VType & 0x7; - return static_cast(VLMUL); +inline static VLMUL getVLMUL(unsigned VType) { + unsigned VLMul = VType & 0x7; + return static_cast(VLMul); } // Decode VLMUL into 1,2,4,8 and fractional indicator. -std::pair decodeVLMUL(RISCVII::VLMUL VLMUL); +std::pair decodeVLMUL(VLMUL VLMul); -inline static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional) { +inline static VLMUL encodeLMUL(unsigned LMUL, bool Fractional) { assert(isValidLMUL(LMUL, Fractional) && "Unsupported LMUL"); unsigned LmulLog2 = Log2_32(LMUL); - return static_cast(Fractional ? 8 - LmulLog2 : LmulLog2); + return static_cast(Fractional ? 8 - LmulLog2 : LmulLog2); } inline static unsigned decodeVSEW(unsigned VSEW) { @@ -133,10 +131,9 @@ inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; } void printVType(unsigned VType, raw_ostream &OS); -unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul); +unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul); -std::optional -getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW); +std::optional getSameRatioLMUL(unsigned SEW, VLMUL VLMUL, unsigned EEW); } // namespace RISCVVType } // namespace llvm diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h index 147a86de4e34e..daf6499213d48 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h @@ -16,14 +16,23 @@ #include "llvm/ADT/StringRef.h" #include "llvm/SandboxIR/Constant.h" #include "llvm/SandboxIR/Pass.h" -#include "llvm/SandboxIR/PassManager.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h" namespace llvm::sandboxir { -class BottomUpVec final : public FunctionPass { +/// This is a simple bottom-up vectorizer Region pass. +/// It expects a "seed slice" as an input in the Region's Aux vector. +/// The "seed slice" is a vector of instructions that can be used as a starting +/// point for vectorization, like stores to consecutive memory addresses. +/// Starting from the seed instructions, it walks up the def-use chain looking +/// for more instructions that can be vectorized. This pass will generate vector +/// code if it can legally vectorize the code, regardless of whether it is +/// profitable or not. For now profitability is checked at the end of the region +/// pass pipeline by a dedicated pass that accepts or rejects the IR +/// transaction, depending on the cost. +class BottomUpVec final : public RegionPass { bool Change = false; std::unique_ptr Legality; /// The original instructions that are potentially dead after vectorization. @@ -55,16 +64,9 @@ class BottomUpVec final : public FunctionPass { /// Entry point for vectorization starting from \p Seeds. bool tryVectorize(ArrayRef Seeds); - /// The PM containing the pipeline of region passes. - RegionPassManager RPM; - public: - BottomUpVec(StringRef Pipeline); - bool runOnFunction(Function &F, const Analyses &A) final; - void printPipeline(raw_ostream &OS) const final { - OS << getName() << "\n"; - RPM.printPipeline(OS); - } + BottomUpVec() : RegionPass("bottom-up-vec") {} + bool runOnRegion(Region &Rgn, const Analyses &A) final; }; } // namespace llvm::sandboxir diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.h new file mode 100644 index 0000000000000..286d971ff4851 --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.h @@ -0,0 +1,40 @@ +//===- SeedCollection.h -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// The seed-collection pass of the bottom-up vectorizer. +// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_SEEDCOLLECTION_H +#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_SEEDCOLLECTION_H + +#include "llvm/SandboxIR/Pass.h" +#include "llvm/SandboxIR/PassManager.h" + +namespace llvm::sandboxir { + +/// This pass collects the instructions that can become vectorization "seeds", +/// like stores to consecutive memory addresses. It then goes over the collected +/// seeds, slicing them into appropriately sized chunks, creating a Region with +/// the seed slice as the Auxiliary vector and runs the region pass pipeline. +class SeedCollection final : public FunctionPass { + + /// The PM containing the pipeline of region passes. + RegionPassManager RPM; + +public: + SeedCollection(StringRef Pipeline); + bool runOnFunction(Function &F, const Analyses &A) final; + void printPipeline(raw_ostream &OS) const final { + OS << getName() << "\n"; + RPM.printPipeline(OS); + } +}; + +} // namespace llvm::sandboxir + +#endif // LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_SEEDCOLLECTION_H diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.h new file mode 100644 index 0000000000000..73aafabfd0b0c --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.h @@ -0,0 +1,28 @@ +//===- TransactionSave.h ----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is a region pass that simply calls Context::save() to save the IR state. +// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONSAVE_H +#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONSAVE_H + +#include "llvm/SandboxIR/Pass.h" +#include "llvm/SandboxIR/Region.h" + +namespace llvm::sandboxir { + +class TransactionSave : public RegionPass { +public: + TransactionSave() : RegionPass("tr-save") {} + bool runOnRegion(Region &Rgn, const Analyses &A) final; +}; + +} // namespace llvm::sandboxir + +#endif // LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_PASSES_TRANSACTIONSAVE_H diff --git a/llvm/lib/Analysis/CallGraph.cpp b/llvm/lib/Analysis/CallGraph.cpp index ed9cd84bb8e9b..01344ee154fa5 100644 --- a/llvm/lib/Analysis/CallGraph.cpp +++ b/llvm/lib/Analysis/CallGraph.cpp @@ -138,16 +138,6 @@ void CallGraph::print(raw_ostream &OS) const { LLVM_DUMP_METHOD void CallGraph::dump() const { print(dbgs()); } #endif -void CallGraph::ReplaceExternalCallEdge(CallGraphNode *Old, - CallGraphNode *New) { - for (auto &CR : ExternalCallingNode->CalledFunctions) - if (CR.second == Old) { - CR.second->DropRef(); - CR.second = New; - CR.second->AddRef(); - } -} - // removeFunctionFromModule - Unlink the function from this module, returning // it. Because this removes the function from the module, the call graph node // is destroyed. This is only valid if the function does not call any other @@ -203,39 +193,6 @@ void CallGraphNode::print(raw_ostream &OS) const { LLVM_DUMP_METHOD void CallGraphNode::dump() const { print(dbgs()); } #endif -/// removeCallEdgeFor - This method removes the edge in the node for the -/// specified call site. Note that this method takes linear time, so it -/// should be used sparingly. -void CallGraphNode::removeCallEdgeFor(CallBase &Call) { - for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) { - assert(I != CalledFunctions.end() && "Cannot find callsite to remove!"); - if (I->first && *I->first == &Call) { - I->second->DropRef(); - *I = CalledFunctions.back(); - CalledFunctions.pop_back(); - - // Remove all references to callback functions if there are any. - forEachCallbackFunction(Call, [=](Function *CB) { - removeOneAbstractEdgeTo(CG->getOrInsertFunction(CB)); - }); - return; - } - } -} - -// removeAnyCallEdgeTo - This method removes any call edges from this node to -// the specified callee function. This takes more time to execute than -// removeCallEdgeTo, so it should not be used unless necessary. -void CallGraphNode::removeAnyCallEdgeTo(CallGraphNode *Callee) { - for (unsigned i = 0, e = CalledFunctions.size(); i != e; ++i) - if (CalledFunctions[i].second == Callee) { - Callee->DropRef(); - CalledFunctions[i] = CalledFunctions.back(); - CalledFunctions.pop_back(); - --i; --e; - } -} - /// removeOneAbstractEdgeTo - Remove one edge associated with a null callsite /// from this node to the specified callee function. void CallGraphNode::removeOneAbstractEdgeTo(CallGraphNode *Callee) { diff --git a/llvm/lib/Analysis/CallGraphSCCPass.cpp b/llvm/lib/Analysis/CallGraphSCCPass.cpp index 7caf814cdb2d7..441f0c5d2f34b 100644 --- a/llvm/lib/Analysis/CallGraphSCCPass.cpp +++ b/llvm/lib/Analysis/CallGraphSCCPass.cpp @@ -725,28 +725,6 @@ Pass *CallGraphSCCPass::createPrinterPass(raw_ostream &OS, return new PrintCallGraphPass(Banner, OS); } -static std::string getDescription(const CallGraphSCC &SCC) { - std::string Desc = "SCC ("; - ListSeparator LS; - for (CallGraphNode *CGN : SCC) { - Desc += LS; - Function *F = CGN->getFunction(); - if (F) - Desc += F->getName(); - else - Desc += "<>"; - } - Desc += ")"; - return Desc; -} - -bool CallGraphSCCPass::skipSCC(CallGraphSCC &SCC) const { - OptPassGate &Gate = - SCC.getCallGraph().getModule().getContext().getOptPassGate(); - return Gate.isEnabled() && - !Gate.shouldRunPass(this->getPassName(), getDescription(SCC)); -} - char DummyCGSCCPass::ID = 0; INITIALIZE_PASS(DummyCGSCCPass, "DummyCGSCCPass", "DummyCGSCCPass", false, diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 8fa150f7d690e..c6b927c8eee2f 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -795,8 +795,9 @@ class InlineCostCallAnalyzer final : public CallAnalyzer { // the given instruction was assessed. if (!PrintInstructionComments) return; - InstructionCostDetailMap[I].CostBefore = Cost; - InstructionCostDetailMap[I].ThresholdBefore = Threshold; + auto &CostDetail = InstructionCostDetailMap[I]; + CostDetail.CostBefore = Cost; + CostDetail.ThresholdBefore = Threshold; } void onInstructionAnalysisFinish(const Instruction *I) override { @@ -804,8 +805,9 @@ class InlineCostCallAnalyzer final : public CallAnalyzer { // the instruction has been assessed. if (!PrintInstructionComments) return; - InstructionCostDetailMap[I].CostAfter = Cost; - InstructionCostDetailMap[I].ThresholdAfter = Threshold; + auto &CostDetail = InstructionCostDetailMap[I]; + CostDetail.CostAfter = Cost; + CostDetail.ThresholdAfter = Threshold; } bool isCostBenefitAnalysisEnabled() { diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 90db89f745e89..5a22ac8abc3fc 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -42,13 +42,12 @@ #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" -#include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PassManager.h" -#include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/IR/ValueHandle.h" @@ -66,7 +65,6 @@ #include using namespace llvm; -using namespace llvm::PatternMatch; #define DEBUG_TYPE "loop-accesses" @@ -2815,50 +2813,25 @@ bool LoopAccessInfo::isInvariant(Value *V) const { return SE->isLoopInvariant(S, TheLoop); } -/// Find the operand of the GEP that should be checked for consecutive -/// stores. This ignores trailing indices that have no effect on the final -/// pointer. -static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) { - const DataLayout &DL = Gep->getDataLayout(); - unsigned LastOperand = Gep->getNumOperands() - 1; - TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); - - // Walk backwards and try to peel off zeros. - while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { - // Find the type we're currently indexing into. - gep_type_iterator GEPTI = gep_type_begin(Gep); - std::advance(GEPTI, LastOperand - 2); - - // If it's a type with the same allocation size as the result of the GEP we - // can peel off the zero index. - TypeSize ElemSize = GEPTI.isStruct() - ? DL.getTypeAllocSize(GEPTI.getIndexedType()) - : GEPTI.getSequentialElementStride(DL); - if (ElemSize != GEPAllocSize) - break; - --LastOperand; - } - - return LastOperand; -} - -/// If the argument is a GEP, then returns the operand identified by -/// getGEPInductionOperand. However, if there is some other non-loop-invariant -/// operand, it returns that instead. -static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { +/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand. +/// Otherwise, return \p Ptr. +static Value *getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, + Loop *Lp) { auto *GEP = dyn_cast(Ptr); if (!GEP) return Ptr; - unsigned InductionOperand = getGEPInductionOperand(GEP); - - // Check that all of the gep indices are uniform except for our induction - // operand. - for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I) - if (I != InductionOperand && - !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp)) - return Ptr; - return GEP->getOperand(InductionOperand); + Value *V = Ptr; + for (const Use &U : GEP->operands()) { + if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) { + if (V == Ptr) + V = U; + else + // There must be exactly one loop-variant operand. + return Ptr; + } + } + return V; } /// Get the stride of a pointer access in a loop. Looks for symbolic @@ -2873,7 +2846,7 @@ static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *L // pointer, otherwise, we are analyzing the index. Value *OrigPtr = Ptr; - Ptr = stripGetElementPtr(Ptr, SE, Lp); + Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp); const SCEV *V = SE->getSCEV(Ptr); if (Ptr != OrigPtr) diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 91a5f194db9dc..e3e026f7979da 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1984,7 +1984,7 @@ static void computeKnownBitsFromOperator(const Operator *I, const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth); uint64_t SEW = RISCVVType::decodeVSEW( cast(II->getArgOperand(HasAVL))->getZExtValue()); - RISCVII::VLMUL VLMUL = static_cast( + RISCVVType::VLMUL VLMUL = static_cast( cast(II->getArgOperand(1 + HasAVL))->getZExtValue()); uint64_t MaxVLEN = Range.getUnsignedMax().getZExtValue() * RISCV::RVVBitsPerBlock; diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp index c9efec37b0bc6..d87649c4e6567 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp @@ -362,8 +362,9 @@ static void clobberRegEntries(InlinedEntity Var, unsigned RegNo, FellowRegisters.push_back(Reg); // Drop all entries that have ended. + auto &Entries = LiveEntries[Var]; for (auto Index : IndicesToErase) - LiveEntries[Var].erase(Index); + Entries.erase(Index); } /// Add a new debug value for \p Var. Closes all overlapping debug values. diff --git a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp index 9656774c6eaae..1a8e11de909e8 100644 --- a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp @@ -11,11 +11,11 @@ //===----------------------------------------------------------------------===// #include "AllocationOrder.h" -#include "RegAllocEvictionAdvisor.h" #include "RegAllocGreedy.h" #include "llvm/Analysis/InteractiveModelRunner.h" #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/TensorSpec.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TFLITE) #include "llvm/Analysis/ModelUnderTrainingRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" @@ -115,7 +115,7 @@ class RegAllocScoring : public MachineFunctionPass { /// RegAllocReward analysis usage. void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); @@ -389,11 +389,12 @@ class MLEvictAdvisor : public RegAllocEvictionAdvisor { // =================================== // Release (AOT) - specifics // =================================== -class ReleaseModeEvictionAdvisorAnalysis final - : public RegAllocEvictionAdvisorAnalysis { +/// Common provider for legacy and new pass managers. +class ReleaseModeEvictionAdvisorProvider final + : public RegAllocEvictionAdvisorProvider { public: - ReleaseModeEvictionAdvisorAnalysis() - : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Release) { + ReleaseModeEvictionAdvisorProvider(LLVMContext &Ctx) + : RegAllocEvictionAdvisorProvider(AdvisorMode::Release, Ctx) { if (EnableDevelopmentFeatures) { InputFeatures = {RA_EVICT_FEATURES_LIST( _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES) @@ -403,21 +404,13 @@ class ReleaseModeEvictionAdvisorAnalysis final } } // support for isa<> and dyn_cast. - static bool classof(const RegAllocEvictionAdvisorAnalysis *R) { + static bool classof(const RegAllocEvictionAdvisorProvider *R) { return R->getAdvisorMode() == AdvisorMode::Release; } -private: - std::vector InputFeatures; - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); - AU.addRequired(); - RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU); - } - std::unique_ptr - getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { + getAdvisor(const MachineFunction &MF, const RAGreedy &RA, + MachineBlockFrequencyInfo *MBFI, MachineLoopInfo *Loops) override { if (!Runner) { if (InteractiveChannelBaseName.empty()) Runner = std::make_unique>( @@ -428,14 +421,45 @@ class ReleaseModeEvictionAdvisorAnalysis final InteractiveChannelBaseName + ".out", InteractiveChannelBaseName + ".in"); } - return std::make_unique( - MF, RA, Runner.get(), - getAnalysis().getMBFI(), - getAnalysis().getLI()); + assert(MBFI && Loops && + "Invalid provider state: must have analysis available"); + return std::make_unique(MF, RA, Runner.get(), *MBFI, + *Loops); } + +private: + std::vector InputFeatures; std::unique_ptr Runner; }; +class ReleaseModeEvictionAdvisorAnalysisLegacy final + : public RegAllocEvictionAdvisorAnalysisLegacy { +public: + ReleaseModeEvictionAdvisorAnalysisLegacy() + : RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode::Release) {} + + void logRewardIfNeeded(const MachineFunction &MF, + llvm::function_ref GetReward) override { + // No-op in release mode + } + + bool doInitialization(Module &M) override { + Provider = + std::make_unique(M.getContext()); + return false; + } + + static bool classof(const RegAllocEvictionAdvisorAnalysisLegacy *R) { + return R->getAdvisorMode() == AdvisorMode::Release; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + RegAllocEvictionAdvisorAnalysisLegacy::getAnalysisUsage(AU); + } +}; + // =================================== // Development mode-specifics // =================================== @@ -468,11 +492,11 @@ class DevelopmentModeEvictAdvisor : public MLEvictAdvisor { Logger *const Log; }; -class DevelopmentModeEvictionAdvisorAnalysis final - : public RegAllocEvictionAdvisorAnalysis { +class DevelopmentModeEvictionAdvisorProvider final + : public RegAllocEvictionAdvisorProvider { public: - DevelopmentModeEvictionAdvisorAnalysis() - : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Development) { + DevelopmentModeEvictionAdvisorProvider(LLVMContext &Ctx) + : RegAllocEvictionAdvisorProvider(AdvisorMode::Development, Ctx) { if (EnableDevelopmentFeatures) { InputFeatures = {RA_EVICT_FEATURES_LIST( _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES) @@ -492,44 +516,10 @@ class DevelopmentModeEvictionAdvisorAnalysis final TensorSpec::createSpec("action_step_type", {1}), TensorSpec::createSpec("action_reward", {1})}; } - } - // support for isa<> and dyn_cast. - static bool classof(const RegAllocEvictionAdvisorAnalysis *R) { - return R->getAdvisorMode() == AdvisorMode::Development; - } - - void logRewardIfNeeded(const MachineFunction &MF, - llvm::function_ref GetReward) override { - if (!Log || !Log->hasAnyObservationForContext(MF.getName())) - return; - // The function pass manager would run all the function passes for a - // function, so we assume the last context belongs to this function. If - // this invariant ever changes, we can implement at that time switching - // contexts. At this point, it'd be an error - if (Log->currentContext() != MF.getName()) { - MF.getFunction().getContext().emitError( - "The training log context shouldn't have had changed."); - } - if (Log->hasObservationInProgress()) - Log->logReward(GetReward()); - } - -private: - std::vector InputFeatures; - std::vector TrainingInputFeatures; - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); - AU.addRequired(); - RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU); - } - - bool doInitialization(Module &M) override { - LLVMContext &Ctx = M.getContext(); if (ModelUnderTraining.empty() && TrainingLog.empty()) { Ctx.emitError("Regalloc development mode should be requested with at " "least logging enabled and/or a training model"); - return false; + return; } if (ModelUnderTraining.empty()) Runner = std::make_unique(Ctx, InputFeatures); @@ -538,15 +528,15 @@ class DevelopmentModeEvictionAdvisorAnalysis final Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures); if (!Runner) { Ctx.emitError("Regalloc: could not set up the model runner"); - return false; + return; } if (TrainingLog.empty()) - return false; + return; std::error_code EC; auto OS = std::make_unique(TrainingLog, EC); if (EC) { - M.getContext().emitError(EC.message() + ":" + TrainingLog); - return false; + Ctx.emitError(EC.message() + ":" + TrainingLog); + return; } std::vector LFS = InputFeatures; if (auto *MUTR = dyn_cast(Runner.get())) @@ -558,25 +548,80 @@ class DevelopmentModeEvictionAdvisorAnalysis final Log = std::make_unique(std::move(OS), LFS, Reward, /*IncludeReward*/ true); - return false; + return; + } + + // support for isa<> and dyn_cast. + static bool classof(const RegAllocEvictionAdvisorProvider *R) { + return R->getAdvisorMode() == AdvisorMode::Development; + } + + void logRewardIfNeeded(const MachineFunction &MF, + llvm::function_ref GetReward) override { + if (!Log || !Log->hasAnyObservationForContext(MF.getName())) + return; + // The function pass manager would run all the function passes for a + // function, so we assume the last context belongs to this function. If + // this invariant ever changes, we can implement at that time switching + // contexts. At this point, it'd be an error + if (Log->currentContext() != MF.getName()) { + MF.getFunction().getContext().emitError( + "The training log context shouldn't have had changed."); + } + if (Log->hasObservationInProgress()) + Log->logReward(GetReward()); } std::unique_ptr - getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { + getAdvisor(const MachineFunction &MF, const RAGreedy &RA, + MachineBlockFrequencyInfo *MBFI, MachineLoopInfo *Loops) override { if (!Runner) return nullptr; if (Log) Log->switchContext(MF.getName()); + assert(MBFI && Loops && + "Invalid provider state: must have analysis available"); return std::make_unique( - MF, RA, Runner.get(), - getAnalysis().getMBFI(), - getAnalysis().getLI(), Log.get()); + MF, RA, Runner.get(), *MBFI, *Loops, Log.get()); } +private: + std::vector InputFeatures; + std::vector TrainingInputFeatures; + std::unique_ptr Runner; std::unique_ptr Log; }; +class DevelopmentModeEvictionAdvisorAnalysisLegacy final + : public RegAllocEvictionAdvisorAnalysisLegacy { +public: + DevelopmentModeEvictionAdvisorAnalysisLegacy() + : RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode::Development) {} + + bool doInitialization(Module &M) override { + Provider = std::make_unique( + M.getContext()); + return false; + } + + void logRewardIfNeeded(const MachineFunction &MF, + llvm::function_ref GetReward) override { + Provider->logRewardIfNeeded(MF, GetReward); + } + + // support for isa<> and dyn_cast. + static bool classof(const RegAllocEvictionAdvisorAnalysisLegacy *R) { + return R->getAdvisorMode() == AdvisorMode::Development; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + RegAllocEvictionAdvisorAnalysisLegacy::getAnalysisUsage(AU); + } +}; + #endif // #ifdef LLVM_HAVE_TFLITE } // namespace @@ -1127,8 +1172,9 @@ void llvm::extractMBBFrequency( // Development mode-specific implementations #ifdef LLVM_HAVE_TFLITE -RegAllocEvictionAdvisorAnalysis *llvm::createDevelopmentModeAdvisor() { - return new DevelopmentModeEvictionAdvisorAnalysis(); +RegAllocEvictionAdvisorAnalysisLegacy * +llvm::createDevelopmentModeAdvisorAnalysisLegacy() { + return new DevelopmentModeEvictionAdvisorAnalysisLegacy(); } int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition( @@ -1194,18 +1240,32 @@ bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) { return *CachedReward; }; - getAnalysis().logRewardIfNeeded(MF, - GetReward); + getAnalysis().logRewardIfNeeded( + MF, GetReward); getAnalysis().logRewardIfNeeded(MF, GetReward); return false; } #endif // #ifdef LLVM_HAVE_TFLITE -RegAllocEvictionAdvisorAnalysis *llvm::createReleaseModeAdvisor() { +RegAllocEvictionAdvisorProvider * +llvm::createReleaseModeAdvisorProvider(LLVMContext &Ctx) { + return new ReleaseModeEvictionAdvisorProvider(Ctx); +} + +RegAllocEvictionAdvisorProvider * +llvm::createDevelopmentModeAdvisorProvider(LLVMContext &Ctx) { +#if defined(LLVM_HAVE_TFLITE) + return new DevelopmentModeEvictionAdvisorProvider(Ctx); +#endif + return nullptr; +} + +RegAllocEvictionAdvisorAnalysisLegacy * +llvm::createReleaseModeAdvisorAnalysisLegacy() { return llvm::isEmbeddedModelEvaluatorValid() || !InteractiveChannelBaseName.empty() - ? new ReleaseModeEvictionAdvisorAnalysis() + ? new ReleaseModeEvictionAdvisorAnalysisLegacy() : nullptr; } diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp index a1f441ebd0d5e..2369615ef0fb6 100644 --- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp @@ -9,12 +9,14 @@ // Implementation of the default eviction advisor and of the Analysis pass. // //===----------------------------------------------------------------------===// - -#include "RegAllocEvictionAdvisor.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #include "AllocationOrder.h" #include "RegAllocGreedy.h" +#include "RegAllocPriorityAdvisor.h" #include "llvm/CodeGen/LiveRegMatrix.h" +#include "llvm/CodeGen/MachineBlockFrequencyInfo.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/CodeGen/VirtRegMap.h" #include "llvm/IR/Module.h" @@ -26,17 +28,18 @@ using namespace llvm; -static cl::opt Mode( +static cl::opt Mode( "regalloc-enable-advisor", cl::Hidden, - cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), + cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values( - clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, + clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), - clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, + clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), - clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, - "development", "for training"))); + clEnumValN( + RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, + "development", "for training"))); static cl::opt EnableLocalReassignment( "enable-local-reassign", cl::Hidden, @@ -59,59 +62,112 @@ cl::opt EvictInterferenceCutoff( #define LLVM_HAVE_TF_AOT #endif -char RegAllocEvictionAdvisorAnalysis::ID = 0; -INITIALIZE_PASS(RegAllocEvictionAdvisorAnalysis, "regalloc-evict", +char RegAllocEvictionAdvisorAnalysisLegacy::ID = 0; +INITIALIZE_PASS(RegAllocEvictionAdvisorAnalysisLegacy, "regalloc-evict", "Regalloc eviction policy", false, true) namespace { -class DefaultEvictionAdvisorAnalysis final - : public RegAllocEvictionAdvisorAnalysis { +class DefaultEvictionAdvisorProvider final + : public RegAllocEvictionAdvisorProvider { public: - DefaultEvictionAdvisorAnalysis(bool NotAsRequested) - : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Default), - NotAsRequested(NotAsRequested) {} + DefaultEvictionAdvisorProvider(bool NotAsRequested, LLVMContext &Ctx) + : RegAllocEvictionAdvisorProvider(AdvisorMode::Default, Ctx) { + if (NotAsRequested) + Ctx.emitError("Requested regalloc eviction advisor analysis " + "could not be created. Using default"); + } // support for isa<> and dyn_cast. - static bool classof(const RegAllocEvictionAdvisorAnalysis *R) { + static bool classof(const RegAllocEvictionAdvisorProvider *R) { return R->getAdvisorMode() == AdvisorMode::Default; } -private: std::unique_ptr - getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { + getAdvisor(const MachineFunction &MF, const RAGreedy &RA, + MachineBlockFrequencyInfo *, MachineLoopInfo *) override { return std::make_unique(MF, RA); } +}; + +class DefaultEvictionAdvisorAnalysisLegacy final + : public RegAllocEvictionAdvisorAnalysisLegacy { +public: + DefaultEvictionAdvisorAnalysisLegacy(bool NotAsRequested) + : RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode::Default), + NotAsRequested(NotAsRequested) {} + bool doInitialization(Module &M) override { - if (NotAsRequested) - M.getContext().emitError("Requested regalloc eviction advisor analysis " - "could not be created. Using default"); - return RegAllocEvictionAdvisorAnalysis::doInitialization(M); + Provider.reset( + new DefaultEvictionAdvisorProvider(NotAsRequested, M.getContext())); + return false; + } + + // support for isa<> and dyn_cast. + static bool classof(const RegAllocEvictionAdvisorAnalysisLegacy *R) { + return R->getAdvisorMode() == AdvisorMode::Default; } + +private: const bool NotAsRequested; }; } // namespace -template <> Pass *llvm::callDefaultCtor() { - Pass *Ret = nullptr; +AnalysisKey RegAllocEvictionAdvisorAnalysis::Key; + +void RegAllocEvictionAdvisorAnalysis::initializeProvider( + RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode Mode, LLVMContext &Ctx) { + if (Provider) + return; + switch (Mode) { + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default: + Provider.reset( + new DefaultEvictionAdvisorProvider(/*NotAsRequested=*/false, Ctx)); + return; + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development: +#if defined(LLVM_HAVE_TFLITE) + Provider.reset(createDevelopmentModeAdvisorProvider(Ctx)); +#else + Provider.reset( + new DefaultEvictionAdvisorProvider(/*NotAsRequested=*/true, Ctx)); +#endif + return; + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release: + Provider.reset(createReleaseModeAdvisorProvider(Ctx)); + return; + } +} + +RegAllocEvictionAdvisorAnalysis::Result +RegAllocEvictionAdvisorAnalysis::run(MachineFunction &MF, + MachineFunctionAnalysisManager &MFAM) { + // Lazy initialization of the provider. + initializeProvider(::Mode, MF.getFunction().getContext()); + return Result{Provider.get()}; +} + +template <> +Pass *llvm::callDefaultCtor() { switch (Mode) { - case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default: - Ret = new DefaultEvictionAdvisorAnalysis(/*NotAsRequested*/ false); - break; - case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development: + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default: + return new DefaultEvictionAdvisorAnalysisLegacy(/*NotAsRequested=*/false); + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release: { + Pass *Ret = createReleaseModeAdvisorAnalysisLegacy(); + // release mode advisor may not be supported + if (Ret) + return Ret; + return new DefaultEvictionAdvisorAnalysisLegacy(/*NotAsRequested=*/true); + } + case RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development: #if defined(LLVM_HAVE_TFLITE) - Ret = createDevelopmentModeAdvisor(); + return createDevelopmentModeAdvisorAnalysisLegacy(); +#else + return new DefaultEvictionAdvisorAnalysisLegacy(/*NotAsRequested=*/true); #endif - break; - case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release: - Ret = createReleaseModeAdvisor(); - break; } - if (Ret) - return Ret; - return new DefaultEvictionAdvisorAnalysis(/*NotAsRequested*/ true); + llvm_unreachable("unexpected advisor mode"); } -StringRef RegAllocEvictionAdvisorAnalysis::getPassName() const { +StringRef RegAllocEvictionAdvisorAnalysisLegacy::getPassName() const { switch (getAdvisorMode()) { case AdvisorMode::Default: return "Default Regalloc Eviction Advisor"; diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 7c7eb2ad52b41..9318c1df0b5e2 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -15,7 +15,6 @@ #include "AllocationOrder.h" #include "InterferenceCache.h" #include "RegAllocBase.h" -#include "RegAllocEvictionAdvisor.h" #include "RegAllocPriorityAdvisor.h" #include "SplitKit.h" #include "llvm/ADT/ArrayRef.h" @@ -46,6 +45,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/CodeGen/SlotIndexes.h" @@ -164,7 +164,7 @@ INITIALIZE_PASS_DEPENDENCY(LiveRegMatrixWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(EdgeBundlesWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(SpillPlacementWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) -INITIALIZE_PASS_DEPENDENCY(RegAllocEvictionAdvisorAnalysis) +INITIALIZE_PASS_DEPENDENCY(RegAllocEvictionAdvisorAnalysisLegacy) INITIALIZE_PASS_DEPENDENCY(RegAllocPriorityAdvisorAnalysis) INITIALIZE_PASS_END(RAGreedy, "greedy", "Greedy Register Allocator", false, false) @@ -219,7 +219,7 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -2765,8 +2765,11 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { : TRI->reverseLocalAssignment(); ExtraInfo.emplace(); - EvictAdvisor = - getAnalysis().getAdvisor(*MF, *this); + + auto &EvictAdvisorProvider = + getAnalysis().getProvider(); + EvictAdvisor = EvictAdvisorProvider.getAdvisor(*MF, *this, MBFI, Loops); + PriorityAdvisor = getAnalysis().getAdvisor(*MF, *this); diff --git a/llvm/lib/CodeGen/RegAllocGreedy.h b/llvm/lib/CodeGen/RegAllocGreedy.h index e1ec63b4a5296..1d55a8241d760 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.h +++ b/llvm/lib/CodeGen/RegAllocGreedy.h @@ -14,7 +14,6 @@ #include "InterferenceCache.h" #include "RegAllocBase.h" -#include "RegAllocEvictionAdvisor.h" #include "RegAllocPriorityAdvisor.h" #include "SplitKit.h" #include "llvm/ADT/ArrayRef.h" diff --git a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.h b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.h index 32e4598b71539..0758743c2b140 100644 --- a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.h +++ b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.h @@ -9,7 +9,7 @@ #ifndef LLVM_CODEGEN_REGALLOCPRIORITYADVISOR_H #define LLVM_CODEGEN_REGALLOCPRIORITYADVISOR_H -#include "RegAllocEvictionAdvisor.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/Pass.h" diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index a0f29496df777..204b323d7084a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -159,6 +159,11 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { Res = PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(N); break; + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: + Res = PromoteIntRes_PARTIAL_REDUCE_MLA(N); + break; + case ISD::SIGN_EXTEND: case ISD::VP_SIGN_EXTEND: case ISD::ZERO_EXTEND: @@ -2099,6 +2104,10 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) { case ISD::VECTOR_FIND_LAST_ACTIVE: Res = PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(N, OpNo); break; + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: + Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N); + break; } // If the result is null, the sub-method took care of registering results etc. @@ -2881,6 +2890,18 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); } +SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) { + SmallVector NewOps(N->ops()); + if (N->getOpcode() == ISD::PARTIAL_REDUCE_SMLA) { + NewOps[1] = SExtPromotedInteger(N->getOperand(1)); + NewOps[2] = SExtPromotedInteger(N->getOperand(2)); + } else { + NewOps[1] = ZExtPromotedInteger(N->getOperand(1)); + NewOps[2] = ZExtPromotedInteger(N->getOperand(2)); + } + return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); +} + //===----------------------------------------------------------------------===// // Integer Result Expansion //===----------------------------------------------------------------------===// @@ -6200,6 +6221,15 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N) { return DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, SDLoc(N), NVT, N->ops()); } +SDValue DAGTypeLegalizer::PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + SDValue ExtAcc = GetPromotedInteger(N->getOperand(0)); + return DAG.getNode(N->getOpcode(), DL, NVT, ExtAcc, N->getOperand(1), + N->getOperand(2)); +} + SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) { EVT OutVT = N->getValueType(0); EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index b58c160b5c8b8..69c687a797485 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -379,6 +379,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { SDValue PromoteIntRes_IS_FPCLASS(SDNode *N); SDValue PromoteIntRes_PATCHPOINT(SDNode *N); SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N); + SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N); // Integer Operand Promotion. bool PromoteIntegerOperand(SDNode *N, unsigned OpNo); @@ -430,6 +431,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { SDValue PromoteIntOp_VP_SPLICE(SDNode *N, unsigned OpNo); SDValue PromoteIntOp_VECTOR_HISTOGRAM(SDNode *N, unsigned OpNo); SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N); void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS); void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code); @@ -969,6 +971,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { void SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_PARTIAL_REDUCE_MLA(SDNode *N, SDValue &Lo, SDValue &Hi); // Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>. bool SplitVectorOperand(SDNode *N, unsigned OpNo); @@ -1000,6 +1003,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { SDValue SplitVecOp_FP_TO_XINT_SAT(SDNode *N); SDValue SplitVecOp_VP_CttzElements(SDNode *N); SDValue SplitVecOp_VECTOR_HISTOGRAM(SDNode *N); + SDValue SplitVecOp_PARTIAL_REDUCE_MLA(SDNode *N); //===--------------------------------------------------------------------===// // Vector Widening Support: LegalizeVectorTypes.cpp diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 7e8bae4b0f785..de4447fb0cf1a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -469,6 +469,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { case ISD::VECTOR_COMPRESS: case ISD::SCMP: case ISD::UCMP: + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; case ISD::SMULFIX: @@ -1197,6 +1199,10 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { case ISD::VECREDUCE_FMINIMUM: Results.push_back(TLI.expandVecReduce(Node, DAG)); return; + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: + Results.push_back(TLI.expandPartialReduceMLA(Node, DAG)); + return; case ISD::VECREDUCE_SEQ_FADD: case ISD::VECREDUCE_SEQ_FMUL: Results.push_back(TLI.expandVecReduceSeq(Node, DAG)); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 1d8bf5427156e..9d42ec2fdf859 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1395,6 +1395,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { case ISD::EXPERIMENTAL_VP_REVERSE: SplitVecRes_VP_REVERSE(N, Lo, Hi); break; + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: + SplitVecRes_PARTIAL_REDUCE_MLA(N, Lo, Hi); + break; } // If Lo/Hi is null, the sub-method took care of registering results etc. @@ -3213,6 +3217,13 @@ void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo, std::tie(Lo, Hi) = DAG.SplitVector(Load, DL); } +void DAGTypeLegalizer::SplitVecRes_PARTIAL_REDUCE_MLA(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDLoc DL(N); + SDValue Expanded = TLI.expandPartialReduceMLA(N, DAG); + std::tie(Lo, Hi) = DAG.SplitVector(Expanded, DL); +} + void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) { unsigned Factor = N->getNumOperands(); @@ -3431,6 +3442,10 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::EXPERIMENTAL_VECTOR_HISTOGRAM: Res = SplitVecOp_VECTOR_HISTOGRAM(N); break; + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: + Res = SplitVecOp_PARTIAL_REDUCE_MLA(N); + break; } // If the result is null, the sub-method took care of registering results etc. @@ -4485,6 +4500,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_HISTOGRAM(SDNode *N) { MMO, IndexType); } +SDValue DAGTypeLegalizer::SplitVecOp_PARTIAL_REDUCE_MLA(SDNode *N) { + return TLI.expandPartialReduceMLA(N, DAG); +} + //===----------------------------------------------------------------------===// // Result Vector Widening //===----------------------------------------------------------------------===// diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 9d2f87497d6fa..80c2de1d99542 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2474,35 +2474,6 @@ SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { return getZExtOrTrunc(Op, SDLoc(Op), ShTy); } -SDValue SelectionDAG::getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, - SDValue Op2) { - EVT FullTy = Op2.getValueType(); - - unsigned Stride = ReducedTy.getVectorMinNumElements(); - unsigned ScaleFactor = FullTy.getVectorMinNumElements() / Stride; - - // Collect all of the subvectors - std::deque Subvectors = {Op1}; - for (unsigned I = 0; I < ScaleFactor; I++) { - auto SourceIndex = getVectorIdxConstant(I * Stride, DL); - Subvectors.push_back( - getNode(ISD::EXTRACT_SUBVECTOR, DL, ReducedTy, {Op2, SourceIndex})); - } - - // Flatten the subvector tree - while (Subvectors.size() > 1) { - Subvectors.push_back( - getNode(ISD::ADD, DL, ReducedTy, {Subvectors[0], Subvectors[1]})); - Subvectors.pop_front(); - Subvectors.pop_front(); - } - - assert(Subvectors.size() == 1 && - "There should only be one subvector after tree flattening"); - - return Subvectors[0]; -} - /// Given a store node \p StoreNode, return true if it is safe to fold that node /// into \p FPNode, which expands to a library call with output pointers. static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, @@ -7883,6 +7854,28 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, break; } + case ISD::PARTIAL_REDUCE_UMLA: + case ISD::PARTIAL_REDUCE_SMLA: { + [[maybe_unused]] EVT AccVT = N1.getValueType(); + [[maybe_unused]] EVT Input1VT = N2.getValueType(); + [[maybe_unused]] EVT Input2VT = N3.getValueType(); + assert(Input1VT.isVector() && Input1VT == Input2VT && + "Expected the second and third operands of the PARTIAL_REDUCE_MLA " + "node to have the same type!"); + assert(VT.isVector() && VT == AccVT && + "Expected the first operand of the PARTIAL_REDUCE_MLA node to have " + "the same type as its result!"); + assert(Input1VT.getVectorElementCount().hasKnownScalarFactor( + AccVT.getVectorElementCount()) && + "Expected the element count of the second and third operands of the " + "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the " + "element count of the first operand and the result!"); + assert(N2.getScalarValueSizeInBits() <= N1.getScalarValueSizeInBits() && + "Expected the second and third operands of the PARTIAL_REDUCE_MLA " + "node to have an element type which is the same as or smaller than " + "the element type of the first operand and result!"); + break; + } } // Memoize node if it doesn't produce a glue result. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 78a6e24e5b8d2..1c58a7f05446c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8115,15 +8115,15 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, return; } case Intrinsic::experimental_vector_partial_reduce_add: { - if (!TLI.shouldExpandPartialReductionIntrinsic(cast(&I))) { visitTargetIntrinsic(I, Intrinsic); return; } - - setValue(&I, DAG.getPartialReduceAdd(sdl, EVT::getEVT(I.getType()), - getValue(I.getOperand(0)), - getValue(I.getOperand(1)))); + SDValue Acc = getValue(I.getOperand(0)); + SDValue Input = getValue(I.getOperand(1)); + setValue(&I, + DAG.getNode(ISD::PARTIAL_REDUCE_UMLA, sdl, Acc.getValueType(), Acc, + Input, DAG.getConstant(1, sdl, Input.getValueType()))); return; } case Intrinsic::experimental_cttz_elts: { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index 8de537173e52c..8457bee3f665b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -569,6 +569,11 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::VECTOR_FIND_LAST_ACTIVE: return "find_last_active"; + case ISD::PARTIAL_REDUCE_UMLA: + return "partial_reduce_umla"; + case ISD::PARTIAL_REDUCE_SMLA: + return "partial_reduce_smla"; + // Vector Predication #define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \ case ISD::SDID: \ diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index adfb96041c5c0..7771958f5adc9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -34,6 +34,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Target/TargetMachine.h" #include +#include using namespace llvm; /// NOTE: The TargetMachine owns TLOF. @@ -11890,6 +11891,57 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, return DAG.getLoad(VecVT, DL, Chain, StackPtr, PtrInfo); } +SDValue TargetLowering::expandPartialReduceMLA(SDNode *N, + SelectionDAG &DAG) const { + SDLoc DL(N); + SDValue Acc = N->getOperand(0); + SDValue MulLHS = N->getOperand(1); + SDValue MulRHS = N->getOperand(2); + EVT AccVT = Acc.getValueType(); + EVT MulOpVT = MulLHS.getValueType(); + + EVT ExtMulOpVT = + EVT::getVectorVT(*DAG.getContext(), AccVT.getVectorElementType(), + MulOpVT.getVectorElementCount()); + unsigned ExtOpc = N->getOpcode() == ISD::PARTIAL_REDUCE_SMLA + ? ISD::SIGN_EXTEND + : ISD::ZERO_EXTEND; + + if (ExtMulOpVT != MulOpVT) { + MulLHS = DAG.getNode(ExtOpc, DL, ExtMulOpVT, MulLHS); + MulRHS = DAG.getNode(ExtOpc, DL, ExtMulOpVT, MulRHS); + } + SDValue Input = MulLHS; + APInt ConstantOne; + if (!ISD::isConstantSplatVector(MulRHS.getNode(), ConstantOne) || + !ConstantOne.isOne()) + Input = DAG.getNode(ISD::MUL, DL, ExtMulOpVT, MulLHS, MulRHS); + + unsigned Stride = AccVT.getVectorMinNumElements(); + unsigned ScaleFactor = MulOpVT.getVectorMinNumElements() / Stride; + + // Collect all of the subvectors + std::deque Subvectors = {Acc}; + for (unsigned I = 0; I < ScaleFactor; I++) { + auto SourceIndex = DAG.getVectorIdxConstant(I * Stride, DL); + Subvectors.push_back( + DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, AccVT, {Input, SourceIndex})); + } + + // Flatten the subvector tree + while (Subvectors.size() > 1) { + Subvectors.push_back( + DAG.getNode(ISD::ADD, DL, AccVT, {Subvectors[0], Subvectors[1]})); + Subvectors.pop_front(); + Subvectors.pop_front(); + } + + assert(Subvectors.size() == 1 && + "There should only be one subvector after tree flattening"); + + return Subvectors[0]; +} + bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 51cde7ce139e2..f5ea3c0b47d6a 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -835,6 +835,10 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::GET_FPENV, VT, Expand); setOperationAction(ISD::SET_FPENV, VT, Expand); setOperationAction(ISD::RESET_FPENV, VT, Expand); + + // PartialReduceMLA operations default to expand. + setOperationAction({ISD::PARTIAL_REDUCE_UMLA, ISD::PARTIAL_REDUCE_SMLA}, VT, + Expand); } // Most targets ignore the @llvm.prefetch intrinsic. diff --git a/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp b/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp index 7f0a45941cf9b..9ac8c5ef66de6 100644 --- a/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp +++ b/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp @@ -469,11 +469,12 @@ void ELFNixPlatform::pushInitializersLoop( Worklist.pop_back(); // If we've already visited this JITDylib on this iteration then continue. - if (JDDepMap.count(DepJD)) + auto [It, Inserted] = JDDepMap.try_emplace(DepJD); + if (!Inserted) continue; // Add dep info. - auto &DM = JDDepMap[DepJD]; + auto &DM = It->second; DepJD->withLinkOrderDo([&](const JITDylibSearchOrder &O) { for (auto &KV : O) { if (KV.first == DepJD) diff --git a/llvm/lib/ExecutionEngine/Orc/JITLinkRedirectableSymbolManager.cpp b/llvm/lib/ExecutionEngine/Orc/JITLinkRedirectableSymbolManager.cpp index 06c545d62d76a..b5b380971d204 100644 --- a/llvm/lib/ExecutionEngine/Orc/JITLinkRedirectableSymbolManager.cpp +++ b/llvm/lib/ExecutionEngine/Orc/JITLinkRedirectableSymbolManager.cpp @@ -47,7 +47,10 @@ void JITLinkRedirectableSymbolManager::emitRedirectableSymbols( Ptr.setScope(jitlink::Scope::Hidden); auto &Stub = PtrJumpStubCreator(*G, StubsSection, Ptr); Stub.setName(Name); - Stub.setScope(jitlink::Scope::Default); + Stub.setScope(Def.getFlags().isExported() ? jitlink::Scope::Default + : jitlink::Scope::Hidden); + Stub.setLinkage(!Def.getFlags().isWeak() ? jitlink::Linkage::Strong + : jitlink::Linkage::Weak); NewSymbols[std::move(PtrName)] = JITSymbolFlags(); } diff --git a/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp b/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp index 7b38621eba824..80f2a1304dde7 100644 --- a/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp +++ b/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp @@ -387,7 +387,7 @@ void LazyReexportsManager::emitRedirectableSymbols( SymbolMap Redirs; size_t I = 0; for (auto &[Name, AI] : Reexports) - Redirs[Name] = (*ReentryPoints)[I++]; + Redirs[Name] = {(*ReentryPoints)[I++].getAddress(), AI.AliasFlags}; I = 0; if (!Reexports.empty()) { diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 7ba23b0bd377e..18bc82fc827f7 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -6555,12 +6555,11 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTargetData( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, - omp::RuntimeFunction *MapperFunc, + CustomMapperCallbackTy CustomMapperCB, omp::RuntimeFunction *MapperFunc, function_ref BodyGenCB, - function_ref DeviceAddrCB, - function_ref CustomMapperCB, Value *SrcLocInfo) { + function_ref DeviceAddrCB, Value *SrcLocInfo) { if (!updateToLocation(Loc)) return InsertPointTy(); @@ -6585,9 +6584,10 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTargetData( auto BeginThenGen = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) -> Error { MapInfo = &GenMapInfoCB(Builder.saveIP()); - emitOffloadingArrays(AllocaIP, Builder.saveIP(), *MapInfo, Info, - /*IsNonContiguous=*/true, DeviceAddrCB, - CustomMapperCB); + if (Error Err = emitOffloadingArrays( + AllocaIP, Builder.saveIP(), *MapInfo, Info, CustomMapperCB, + /*IsNonContiguous=*/true, DeviceAddrCB)) + return Err; TargetDataRTArgs RTArgs; emitOffloadingArraysArgument(Builder, RTArgs, Info); @@ -7486,26 +7486,31 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::emitTargetTask( return Builder.saveIP(); } -void OpenMPIRBuilder::emitOffloadingArraysAndArgs( +Error OpenMPIRBuilder::emitOffloadingArraysAndArgs( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info, - TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, bool IsNonContiguous, - bool ForEndCall, function_ref DeviceAddrCB, - function_ref CustomMapperCB) { - emitOffloadingArrays(AllocaIP, CodeGenIP, CombinedInfo, Info, IsNonContiguous, - DeviceAddrCB, CustomMapperCB); + TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, + CustomMapperCallbackTy CustomMapperCB, bool IsNonContiguous, + bool ForEndCall, function_ref DeviceAddrCB) { + if (Error Err = + emitOffloadingArrays(AllocaIP, CodeGenIP, CombinedInfo, Info, + CustomMapperCB, IsNonContiguous, DeviceAddrCB)) + return Err; emitOffloadingArraysArgument(Builder, RTArgs, Info, ForEndCall); + return Error::success(); } static void emitTargetCall(OpenMPIRBuilder &OMPBuilder, IRBuilderBase &Builder, OpenMPIRBuilder::InsertPointTy AllocaIP, + OpenMPIRBuilder::TargetDataInfo &Info, const OpenMPIRBuilder::TargetKernelDefaultAttrs &DefaultAttrs, const OpenMPIRBuilder::TargetKernelRuntimeAttrs &RuntimeAttrs, Value *IfCond, Function *OutlinedFn, Constant *OutlinedFnID, SmallVectorImpl &Args, OpenMPIRBuilder::GenMapInfoCallbackTy GenMapInfoCB, - SmallVector Dependencies = {}, - bool HasNoWait = false) { + OpenMPIRBuilder::CustomMapperCallbackTy CustomMapperCB, + SmallVector Dependencies, + bool HasNoWait) { // Generate a function call to the host fallback implementation of the target // region. This is called by the host when no offload entry was generated for // the target region and when the offloading call fails at runtime. @@ -7576,16 +7581,13 @@ emitTargetCall(OpenMPIRBuilder &OMPBuilder, IRBuilderBase &Builder, auto &&EmitTargetCallThen = [&](OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP) -> Error { - OpenMPIRBuilder::TargetDataInfo Info( - /*RequiresDevicePointerInfo=*/false, - /*SeparateBeginEndCalls=*/true); - OpenMPIRBuilder::MapInfosTy &MapInfo = GenMapInfoCB(Builder.saveIP()); OpenMPIRBuilder::TargetDataRTArgs RTArgs; - OMPBuilder.emitOffloadingArraysAndArgs(AllocaIP, Builder.saveIP(), Info, - RTArgs, MapInfo, - /*IsNonContiguous=*/true, - /*ForEndCall=*/false); + if (Error Err = OMPBuilder.emitOffloadingArraysAndArgs( + AllocaIP, Builder.saveIP(), Info, RTArgs, MapInfo, CustomMapperCB, + /*IsNonContiguous=*/true, + /*ForEndCall=*/false)) + return Err; SmallVector NumTeamsC; for (auto [DefaultVal, RuntimeVal] : @@ -7687,13 +7689,15 @@ emitTargetCall(OpenMPIRBuilder &OMPBuilder, IRBuilderBase &Builder, OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTarget( const LocationDescription &Loc, bool IsOffloadEntry, InsertPointTy AllocaIP, - InsertPointTy CodeGenIP, TargetRegionEntryInfo &EntryInfo, + InsertPointTy CodeGenIP, TargetDataInfo &Info, + TargetRegionEntryInfo &EntryInfo, const TargetKernelDefaultAttrs &DefaultAttrs, const TargetKernelRuntimeAttrs &RuntimeAttrs, Value *IfCond, - SmallVectorImpl &Args, GenMapInfoCallbackTy GenMapInfoCB, + SmallVectorImpl &Inputs, GenMapInfoCallbackTy GenMapInfoCB, OpenMPIRBuilder::TargetBodyGenCallbackTy CBFunc, OpenMPIRBuilder::TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB, - SmallVector Dependencies, bool HasNowait) { + CustomMapperCallbackTy CustomMapperCB, SmallVector Dependencies, + bool HasNowait) { if (!updateToLocation(Loc)) return InsertPointTy(); @@ -7707,16 +7711,16 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTarget( // and ArgAccessorFuncCB if (Error Err = emitTargetOutlinedFunction( *this, Builder, IsOffloadEntry, EntryInfo, DefaultAttrs, OutlinedFn, - OutlinedFnID, Args, CBFunc, ArgAccessorFuncCB)) + OutlinedFnID, Inputs, CBFunc, ArgAccessorFuncCB)) return Err; // If we are not on the target device, then we need to generate code // to make a remote call (offload) to the previously outlined function // that represents the target region. Do that now. if (!Config.isTargetDevice()) - emitTargetCall(*this, Builder, AllocaIP, DefaultAttrs, RuntimeAttrs, IfCond, - OutlinedFn, OutlinedFnID, Args, GenMapInfoCB, Dependencies, - HasNowait); + emitTargetCall(*this, Builder, AllocaIP, Info, DefaultAttrs, RuntimeAttrs, + IfCond, OutlinedFn, OutlinedFnID, Inputs, GenMapInfoCB, + CustomMapperCB, Dependencies, HasNowait); return Builder.saveIP(); } @@ -8041,12 +8045,11 @@ void OpenMPIRBuilder::emitUDMapperArrayInitOrDel( OffloadingArgs); } -Function *OpenMPIRBuilder::emitUserDefinedMapper( - function_ref +Expected OpenMPIRBuilder::emitUserDefinedMapper( + function_ref GenMapInfoCB, - Type *ElemTy, StringRef FuncName, - function_ref CustomMapperCB) { + Type *ElemTy, StringRef FuncName, CustomMapperCallbackTy CustomMapperCB) { SmallVector Params; Params.emplace_back(Builder.getPtrTy()); Params.emplace_back(Builder.getPtrTy()); @@ -8117,7 +8120,9 @@ Function *OpenMPIRBuilder::emitUserDefinedMapper( PtrPHI->addIncoming(PtrBegin, HeadBB); // Get map clause information. Fill up the arrays with all mapped variables. - MapInfosTy &Info = GenMapInfoCB(Builder.saveIP(), PtrPHI, BeginIn); + MapInfosOrErrorTy Info = GenMapInfoCB(Builder.saveIP(), PtrPHI, BeginIn); + if (!Info) + return Info.takeError(); // Call the runtime API __tgt_mapper_num_components to get the number of // pre-existing components. @@ -8129,20 +8134,20 @@ Function *OpenMPIRBuilder::emitUserDefinedMapper( Builder.CreateShl(PreviousSize, Builder.getInt64(getFlagMemberOffset())); // Fill up the runtime mapper handle for all components. - for (unsigned I = 0; I < Info.BasePointers.size(); ++I) { + for (unsigned I = 0; I < Info->BasePointers.size(); ++I) { Value *CurBaseArg = - Builder.CreateBitCast(Info.BasePointers[I], Builder.getPtrTy()); + Builder.CreateBitCast(Info->BasePointers[I], Builder.getPtrTy()); Value *CurBeginArg = - Builder.CreateBitCast(Info.Pointers[I], Builder.getPtrTy()); - Value *CurSizeArg = Info.Sizes[I]; - Value *CurNameArg = Info.Names.size() - ? Info.Names[I] + Builder.CreateBitCast(Info->Pointers[I], Builder.getPtrTy()); + Value *CurSizeArg = Info->Sizes[I]; + Value *CurNameArg = Info->Names.size() + ? Info->Names[I] : Constant::getNullValue(Builder.getPtrTy()); // Extract the MEMBER_OF field from the map type. Value *OriMapType = Builder.getInt64( static_cast>( - Info.Types[I])); + Info->Types[I])); Value *MemberMapType = Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize); @@ -8224,10 +8229,13 @@ Function *OpenMPIRBuilder::emitUserDefinedMapper( Value *OffloadingArgs[] = {MapperHandle, CurBaseArg, CurBeginArg, CurSizeArg, CurMapType, CurNameArg}; - Function *ChildMapperFn = nullptr; - if (CustomMapperCB && CustomMapperCB(I, &ChildMapperFn)) { + + auto ChildMapperFn = CustomMapperCB(I); + if (!ChildMapperFn) + return ChildMapperFn.takeError(); + if (*ChildMapperFn) { // Call the corresponding mapper function. - Builder.CreateCall(ChildMapperFn, OffloadingArgs)->setDoesNotThrow(); + Builder.CreateCall(*ChildMapperFn, OffloadingArgs)->setDoesNotThrow(); } else { // Call the runtime API __tgt_push_mapper_component to fill up the runtime // data structure. @@ -8261,18 +8269,18 @@ Function *OpenMPIRBuilder::emitUserDefinedMapper( return MapperFn; } -void OpenMPIRBuilder::emitOffloadingArrays( +Error OpenMPIRBuilder::emitOffloadingArrays( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, - TargetDataInfo &Info, bool IsNonContiguous, - function_ref DeviceAddrCB, - function_ref CustomMapperCB) { + TargetDataInfo &Info, CustomMapperCallbackTy CustomMapperCB, + bool IsNonContiguous, + function_ref DeviceAddrCB) { // Reset the array information. Info.clearArrayInfo(); Info.NumberOfPtrs = CombinedInfo.BasePointers.size(); if (Info.NumberOfPtrs == 0) - return; + return Error::success(); Builder.restoreIP(AllocaIP); // Detect if we have any capture size requiring runtime evaluation of the @@ -8436,9 +8444,13 @@ void OpenMPIRBuilder::emitOffloadingArrays( // Fill up the mapper array. unsigned IndexSize = M.getDataLayout().getIndexSizeInBits(0); Value *MFunc = ConstantPointerNull::get(PtrTy); - if (CustomMapperCB) - if (Value *CustomMFunc = CustomMapperCB(I)) - MFunc = Builder.CreatePointerCast(CustomMFunc, PtrTy); + + auto CustomMFunc = CustomMapperCB(I); + if (!CustomMFunc) + return CustomMFunc.takeError(); + if (*CustomMFunc) + MFunc = Builder.CreatePointerCast(*CustomMFunc, PtrTy); + Value *MAddr = Builder.CreateInBoundsGEP( MappersArray->getAllocatedType(), MappersArray, {Builder.getIntN(IndexSize, 0), Builder.getIntN(IndexSize, I)}); @@ -8448,8 +8460,9 @@ void OpenMPIRBuilder::emitOffloadingArrays( if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() || Info.NumberOfPtrs == 0) - return; + return Error::success(); emitNonContiguousDescriptor(AllocaIP, CodeGenIP, CombinedInfo, Info); + return Error::success(); } void OpenMPIRBuilder::emitBranch(BasicBlock *Target) { diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 8d5c0b3c13e01..96939f89279c6 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -127,6 +127,7 @@ #include "llvm/CodeGen/PeepholeOptimizer.h" #include "llvm/CodeGen/PostRASchedulerList.h" #include "llvm/CodeGen/PreISelIntrinsicLowering.h" +#include "llvm/CodeGen/RegAllocEvictionAdvisor.h" #include "llvm/CodeGen/RegAllocFast.h" #include "llvm/CodeGen/RegUsageInfoCollector.h" #include "llvm/CodeGen/RegUsageInfoPropagate.h" diff --git a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp index 3a45113b0a2ea..23c87702eb133 100644 --- a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp +++ b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp @@ -77,9 +77,18 @@ static const uint32_t DefaultCutoffsData[] = { const ArrayRef ProfileSummaryBuilder::DefaultCutoffs = DefaultCutoffsData; +// An entry for the 0th percentile to correctly calculate hot/cold count +// thresholds when -profile-summary-cutoff-hot/cold is 0. If the hot cutoff is +// 0, no sample counts are treated as hot. If the cold cutoff is 0, all sample +// counts are treated as cold. Assumes there is no UINT64_MAX sample counts. +static const ProfileSummaryEntry ZeroCutoffEntry = {0, UINT64_MAX, 0}; + const ProfileSummaryEntry & ProfileSummaryBuilder::getEntryForPercentile(const SummaryEntryVector &DS, uint64_t Percentile) { + if (Percentile == 0) + return ZeroCutoffEntry; + auto It = partition_point(DS, [=](const ProfileSummaryEntry &Entry) { return Entry.Cutoff < Percentile; }); diff --git a/llvm/lib/SandboxIR/Region.cpp b/llvm/lib/SandboxIR/Region.cpp index ede738c35f032..086993e6dc872 100644 --- a/llvm/lib/SandboxIR/Region.cpp +++ b/llvm/lib/SandboxIR/Region.cpp @@ -40,8 +40,10 @@ Region::Region(Context &Ctx, TargetTransformInfo &TTI) CreateInstCB = Ctx.registerCreateInstrCallback( [this](Instruction *NewInst) { add(NewInst); }); - EraseInstCB = Ctx.registerEraseInstrCallback( - [this](Instruction *ErasedInst) { remove(ErasedInst); }); + EraseInstCB = Ctx.registerEraseInstrCallback([this](Instruction *ErasedInst) { + remove(ErasedInst); + removeFromAux(ErasedInst); + }); } Region::~Region() { @@ -84,11 +86,22 @@ void Region::setAux(unsigned Idx, Instruction *I) { Aux[Idx] = I; } +void Region::dropAuxMetadata(Instruction *I) { + auto *LLVMI = cast(I->Val); + LLVMI->setMetadata(AuxMDKind, nullptr); +} + +void Region::removeFromAux(Instruction *I) { + auto It = find(Aux, I); + if (It == Aux.end()) + return; + dropAuxMetadata(I); + Aux.erase(It); +} + void Region::clearAux() { - for (unsigned Idx : seq(0, Aux.size())) { - auto *LLVMI = cast(Aux[Idx]->Val); - LLVMI->setMetadata(AuxMDKind, nullptr); - } + for (unsigned Idx : seq(0, Aux.size())) + dropAuxMetadata(Aux[Idx]); Aux.clear(); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8f849af6f4d35..50be082777835 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -154,6 +154,13 @@ cl::opt EnableSVEGISel( cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false)); +// FIXME : This is a temporary flag, and is used to help transition to +// performing lowering the proper way using the new PARTIAL_REDUCE_MLA ISD +// nodes. +static cl::opt EnablePartialReduceNodes( + "aarch64-enable-partial-reduce-nodes", cl::init(false), cl::ReallyHidden, + cl::desc("Use the new method of lowering partial reductions.")); + /// Value type used for condition codes. static const MVT MVT_CC = MVT::i32; @@ -2050,6 +2057,8 @@ bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic( const IntrinsicInst *I) const { if (I->getIntrinsicID() != Intrinsic::experimental_vector_partial_reduce_add) return true; + if (EnablePartialReduceNodes) + return true; EVT VT = EVT::getEVT(I->getType()); auto Op1 = I->getOperand(1); @@ -11780,8 +11789,9 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { if (Align && *Align > MinSlotSize) { VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Align->value() - 1, DL, PtrVT)); - VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, - DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT)); + VAList = + DAG.getNode(ISD::AND, DL, PtrVT, VAList, + DAG.getSignedConstant(-(int64_t)Align->value(), DL, PtrVT)); } Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); @@ -16147,8 +16157,9 @@ AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) - SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); + SP = + DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), + DAG.getSignedConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); SDValue Ops[2] = {SP, Chain}; return DAG.getMergeValues(Ops, dl); @@ -16185,7 +16196,7 @@ AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); + DAG.getSignedConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl); @@ -16213,7 +16224,7 @@ AArch64TargetLowering::LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); + DAG.getSignedConstant(-(uint64_t)Align->value(), dl, VT)); // Set the real SP to the new value with a probing loop. Chain = DAG.getNode(AArch64ISD::PROBED_ALLOCA, dl, MVT::Other, Chain, SP); @@ -21485,7 +21496,7 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) { Op = DAG.getNode(Opcode, dl, VT, Op, - DAG.getConstant(-ShiftAmount, dl, MVT::i32)); + DAG.getSignedConstant(-ShiftAmount, dl, MVT::i32)); if (N->getValueType(0) == MVT::i64) Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, DAG.getConstant(0, dl, MVT::i64)); @@ -21976,8 +21987,11 @@ static SDValue performIntrinsicCombine(SDNode *N, return Dot; if (SDValue WideAdd = tryLowerPartialReductionToWideAdd(N, Subtarget, DAG)) return WideAdd; - return DAG.getPartialReduceAdd(SDLoc(N), N->getValueType(0), - N->getOperand(1), N->getOperand(2)); + SDLoc DL(N); + SDValue Input = N->getOperand(2); + return DAG.getNode(ISD::PARTIAL_REDUCE_UMLA, DL, N->getValueType(0), + N->getOperand(1), Input, + DAG.getConstant(1, DL, Input.getValueType())); } case Intrinsic::aarch64_neon_vcvtfxs2fp: case Intrinsic::aarch64_neon_vcvtfxu2fp: @@ -27364,10 +27378,10 @@ static void ReplaceATOMIC_LOAD_128Results(SDNode *N, SDLoc dl(Val128); Val2x64.first = DAG.getNode(ISD::XOR, dl, MVT::i64, - DAG.getConstant(-1ULL, dl, MVT::i64), Val2x64.first); + DAG.getAllOnesConstant(dl, MVT::i64), Val2x64.first); Val2x64.second = DAG.getNode(ISD::XOR, dl, MVT::i64, - DAG.getConstant(-1ULL, dl, MVT::i64), Val2x64.second); + DAG.getAllOnesConstant(dl, MVT::i64), Val2x64.second); } SDValue Ops[] = {Val2x64.first, Val2x64.second, Ptr, Chain}; diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h index 4a0e5ef58ac93..42392e22643b2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.h +++ b/llvm/lib/Target/AMDGPU/AMDGPU.h @@ -448,7 +448,7 @@ ModulePass *createAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass(); void initializeAMDGPUOpenCLEnqueuedBlockLoweringLegacyPass(PassRegistry &); extern char &AMDGPUOpenCLEnqueuedBlockLoweringLegacyID; -void initializeGCNNSAReassignPass(PassRegistry &); +void initializeGCNNSAReassignLegacyPass(PassRegistry &); extern char &GCNNSAReassignID; void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp index b0b6c4df8e982..86b2c4f78fc3e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp @@ -2078,6 +2078,7 @@ static bool isRemovablePointerIntrinsic(Intrinsic::ID IID) { switch (IID) { default: return false; + case Intrinsic::amdgcn_make_buffer_rsrc: case Intrinsic::ptrmask: case Intrinsic::invariant_start: case Intrinsic::invariant_end: @@ -2092,6 +2093,25 @@ PtrParts SplitPtrStructs::visitIntrinsicInst(IntrinsicInst &I) { switch (IID) { default: break; + case Intrinsic::amdgcn_make_buffer_rsrc: { + if (!isSplitFatPtr(I.getType())) + return {nullptr, nullptr}; + Value *Base = I.getArgOperand(0); + Value *Stride = I.getArgOperand(1); + Value *NumRecords = I.getArgOperand(2); + Value *Flags = I.getArgOperand(3); + auto *SplitType = cast(I.getType()); + Type *RsrcType = SplitType->getElementType(0); + Type *OffType = SplitType->getElementType(1); + IRB.SetInsertPoint(&I); + Value *Rsrc = IRB.CreateIntrinsic(IID, {RsrcType, Base->getType()}, + {Base, Stride, NumRecords, Flags}); + copyMetadata(Rsrc, &I); + Rsrc->takeName(&I); + Value *Zero = Constant::getNullValue(OffType); + SplitUsers.insert(&I); + return {Rsrc, Zero}; + } case Intrinsic::ptrmask: { Value *Ptr = I.getArgOperand(0); if (!isSplitFatPtr(Ptr->getType())) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index 38272b9d4840d..895d1e77bf1c4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -114,9 +114,63 @@ bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, llvm_unreachable("unknown operand type"); } -void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { +// Lower true16 D16 Pseudo instruction to d16_lo/d16_hi MCInst based on +// Dst/Data's .l/.h selection +void AMDGPUMCInstLower::lowerT16D16Helper(const MachineInstr *MI, + MCInst &OutMI) const { unsigned Opcode = MI->getOpcode(); const auto *TII = static_cast(ST.getInstrInfo()); + const SIRegisterInfo &TRI = TII->getRegisterInfo(); + const auto *Info = AMDGPU::getT16D16Helper(Opcode); + + llvm::AMDGPU::OpName OpName; + if (TII->isDS(Opcode)) { + if (MI->mayLoad()) + OpName = llvm::AMDGPU::OpName::vdst; + else if (MI->mayStore()) + OpName = llvm::AMDGPU::OpName::data0; + else + llvm_unreachable("LDS load or store expected"); + } else { + OpName = AMDGPU::hasNamedOperand(Opcode, llvm::AMDGPU::OpName::vdata) + ? llvm::AMDGPU::OpName::vdata + : llvm::AMDGPU::OpName::vdst; + } + + // select Dst/Data + int VDstOrVDataIdx = AMDGPU::getNamedOperandIdx(Opcode, OpName); + const MachineOperand &MIVDstOrVData = MI->getOperand(VDstOrVDataIdx); + + // select hi/lo MCInst + bool IsHi = AMDGPU::isHi16Reg(MIVDstOrVData.getReg(), TRI); + Opcode = IsHi ? Info->HiOp : Info->LoOp; + + int MCOpcode = TII->pseudoToMCOpcode(Opcode); + assert(MCOpcode != -1 && + "Pseudo instruction doesn't have a target-specific version"); + OutMI.setOpcode(MCOpcode); + + // lower operands + for (int I = 0, E = MI->getNumExplicitOperands(); I < E; I++) { + const MachineOperand &MO = MI->getOperand(I); + MCOperand MCOp; + if (I == VDstOrVDataIdx) + MCOp = MCOperand::createReg(TRI.get32BitRegister(MIVDstOrVData.getReg())); + else + lowerOperand(MO, MCOp); + OutMI.addOperand(MCOp); + } + + if (AMDGPU::hasNamedOperand(MCOpcode, AMDGPU::OpName::vdst_in)) { + MCOperand MCOp; + lowerOperand(MIVDstOrVData, MCOp); + OutMI.addOperand(MCOp); + } +} + +void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { + unsigned Opcode = MI->getOpcode(); + const auto *TII = static_cast(ST.getInstrInfo()); // FIXME: Should be able to handle this with lowerPseudoInstExpansion. We // need to select it to the subtarget specific version, and there's no way to @@ -137,6 +191,9 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { Opcode == AMDGPU::SI_TCRETURN_GFX) { // TODO: How to use branch immediate and avoid register+add? Opcode = AMDGPU::S_SETPC_B64; + } else if (AMDGPU::getT16D16Helper(Opcode)) { + lowerT16D16Helper(MI, OutMI); + return; } int MCOpcode = TII->pseudoToMCOpcode(Opcode); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h index 7176cc5d3439b..5ddf1ca2ab06d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h @@ -39,6 +39,8 @@ class AMDGPUMCInstLower { /// Lower a MachineInstr to an MCInst void lower(const MachineInstr *MI, MCInst &OutMI) const; + + void lowerT16D16Helper(const MachineInstr *MI, MCInst &OutMI) const; }; namespace { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def index 225f84725874b..fd1341e8c91b2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def +++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def @@ -100,6 +100,7 @@ MACHINE_FUNCTION_PASS("amdgpu-isel", AMDGPUISelDAGToDAGPass(*this)) MACHINE_FUNCTION_PASS("amdgpu-pre-ra-long-branch-reg", GCNPreRALongBranchRegPass()) MACHINE_FUNCTION_PASS("amdgpu-rewrite-partial-reg-uses", GCNRewritePartialRegUsesPass()) MACHINE_FUNCTION_PASS("amdgpu-pre-ra-optimizations", GCNPreRAOptimizationsPass()) +MACHINE_FUNCTION_PASS("amdgpu-nsa-reassign", GCNNSAReassignPass()) MACHINE_FUNCTION_PASS("gcn-dpp-combine", GCNDPPCombinePass()) MACHINE_FUNCTION_PASS("si-fix-sgpr-copies", SIFixSGPRCopiesPass()) MACHINE_FUNCTION_PASS("si-fix-vgpr-copies", SIFixVGPRCopiesPass()) @@ -120,7 +121,6 @@ MACHINE_FUNCTION_PASS("si-wqm", SIWholeQuadModePass()) #define DUMMY_MACHINE_FUNCTION_PASS(NAME, CREATE_PASS) DUMMY_MACHINE_FUNCTION_PASS("amdgpu-insert-delay-alu", AMDGPUInsertDelayAluPass()) -DUMMY_MACHINE_FUNCTION_PASS("amdgpu-nsa-reassign", GCNNSAReassignPass()) DUMMY_MACHINE_FUNCTION_PASS("amdgpu-pre-ra-optimizations", GCNPreRAOptimizationsPass()) DUMMY_MACHINE_FUNCTION_PASS("amdgpu-rewrite-partial-reg-uses", GCNRewritePartialRegUsesPass()) DUMMY_MACHINE_FUNCTION_PASS("amdgpu-set-wave-priority", AMDGPUSetWavePriorityPass()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 92ab106dd4a98..7c9377e61230b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -32,6 +32,7 @@ #include "AMDGPUWaitSGPRHazards.h" #include "GCNDPPCombine.h" #include "GCNIterativeScheduler.h" +#include "GCNNSAReassign.h" #include "GCNPreRALongBranchReg.h" #include "GCNPreRAOptimizations.h" #include "GCNRewritePartialRegUses.h" @@ -550,7 +551,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() { initializeAMDGPUImageIntrinsicOptimizerPass(*PR); initializeAMDGPUPrintfRuntimeBindingPass(*PR); initializeAMDGPUResourceUsageAnalysisPass(*PR); - initializeGCNNSAReassignPass(*PR); + initializeGCNNSAReassignLegacyPass(*PR); initializeGCNPreRAOptimizationsLegacyPass(*PR); initializeGCNPreRALongBranchRegLegacyPass(*PR); initializeGCNRewritePartialRegUsesLegacyPass(*PR); @@ -2112,6 +2113,12 @@ Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const { return Error::success(); } +void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass) const { + if (EnableRegReassign) { + addPass(GCNNSAReassignPass()); + } +} + void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization( AddMachinePass &addPass) const { Base::addMachineSSAOptimization(addPass); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h index 1455494d0ef7d..eb5a9ca1f86d6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h @@ -177,6 +177,7 @@ class AMDGPUCodeGenPassBuilder void addILPOpts(AddMachinePass &) const; void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const; Error addInstSelector(AddMachinePass &) const; + void addPreRewrite(AddMachinePass &) const; void addMachineSSAOptimization(AddMachinePass &) const; void addPostRegAlloc(AddMachinePass &) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp b/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp index e70d6aab306fe..4df55eac5d76b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp @@ -384,13 +384,14 @@ class AMDGPUWaitSGPRHazards { } } - bool Changed = State != BlockState[&MBB].Out; + BlockHazardState &BS = BlockState[&MBB]; + bool Changed = State != BS.Out; if (Emit) { assert(!Changed && "Hazard state should not change on emit pass"); return Emitted; } if (Changed) - BlockState[&MBB].Out = State; + BS.Out = State; return Changed; } diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index 8fa708b74dde3..ea6e703eba5d9 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -16,6 +16,12 @@ let WantsRoot = true in { def ScratchSVAddr : ComplexPattern; } +class True16D16Table { + Instruction T16Op = !cast(NAME); + Instruction HiOp = !cast(hiOp); + Instruction LoOp = !cast(loOp); +} + //===----------------------------------------------------------------------===// // FLAT classes //===----------------------------------------------------------------------===// @@ -226,6 +232,12 @@ class FLAT_Load_Pseudo { + def "" : FLAT_Load_Pseudo; + let True16Predicate = UseRealTrue16Insts in + def _t16 : FLAT_Load_Pseudo, True16D16Table; +} + class FLAT_Store_Pseudo : FLAT_Pseudo< opName, @@ -662,12 +674,12 @@ def FLAT_STORE_DWORDX3 : FLAT_Store_Pseudo <"flat_store_dwordx3", VReg_96>; let SubtargetPredicate = HasD16LoadStore in { let TiedSourceNotRead = 1 in { -def FLAT_LOAD_UBYTE_D16 : FLAT_Load_Pseudo <"flat_load_ubyte_d16", VGPR_32, 1>; def FLAT_LOAD_UBYTE_D16_HI : FLAT_Load_Pseudo <"flat_load_ubyte_d16_hi", VGPR_32, 1>; -def FLAT_LOAD_SBYTE_D16 : FLAT_Load_Pseudo <"flat_load_sbyte_d16", VGPR_32, 1>; +defm FLAT_LOAD_UBYTE_D16 : FLAT_Load_Pseudo_t16 <"flat_load_ubyte_d16">; def FLAT_LOAD_SBYTE_D16_HI : FLAT_Load_Pseudo <"flat_load_sbyte_d16_hi", VGPR_32, 1>; -def FLAT_LOAD_SHORT_D16 : FLAT_Load_Pseudo <"flat_load_short_d16", VGPR_32, 1>; +defm FLAT_LOAD_SBYTE_D16 : FLAT_Load_Pseudo_t16 <"flat_load_sbyte_d16">; def FLAT_LOAD_SHORT_D16_HI : FLAT_Load_Pseudo <"flat_load_short_d16_hi", VGPR_32, 1>; +defm FLAT_LOAD_SHORT_D16 : FLAT_Load_Pseudo_t16 <"flat_load_short_d16">; } def FLAT_STORE_BYTE_D16_HI : FLAT_Store_Pseudo <"flat_store_byte_d16_hi", VGPR_32>; @@ -1049,6 +1061,11 @@ class FlatLoadPat_D16 : (inst $vaddr, $offset, 0, $in) >; +class FlatLoadPat_D16_t16 : GCNPat < + (vt (node (FlatOffset (i64 VReg_64:$vaddr), i32:$offset))), + (inst $vaddr, $offset, (i32 0)) +>; + class FlatSignedLoadPat_D16 : GCNPat < (node (GlobalOffset (i64 VReg_64:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) @@ -1371,16 +1388,29 @@ def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; -def : FlatLoadPat ; -def : FlatLoadPat ; -def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; -def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; +foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in +let True16Predicate = p in { + def : FlatLoadPat ; + def : FlatLoadPat ; + def : FlatLoadPat ; + def : FlatLoadPat ; + def : FlatStorePat ; + def : FlatStorePat ; +} + +let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predicate = UseRealTrue16Insts in { + def : FlatLoadPat_D16_t16; + def : FlatLoadPat_D16_t16; + def : FlatLoadPat_D16_t16; + def : FlatLoadPat_D16_t16; +} // End let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predicate = UseRealTrue16Insts + def : FlatLoadPat ; def : FlatLoadPat ; @@ -2761,3 +2791,11 @@ defm SCRATCH_STORE_SHORT_D16_HI : VSCRATCH_Real_AllAddr_gfx12<0x25, "scratch_ defm SCRATCH_LOAD_BLOCK : VSCRATCH_Real_AllAddr_gfx12<0x53>; defm SCRATCH_STORE_BLOCK : VSCRATCH_Real_AllAddr_gfx12<0x54>; + +def True16D16Table : GenericTable { + let FilterClass = "True16D16Table"; + let CppTypeName = "True16D16Info"; + let Fields = ["T16Op", "HiOp", "LoOp"]; + let PrimaryKey = ["T16Op"]; + let PrimaryKeyName = "getT16D16Helper"; +} diff --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp index 85e79aa4b7595..13eb0ca539a4c 100644 --- a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp +++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp @@ -13,6 +13,7 @@ /// //===----------------------------------------------------------------------===// +#include "GCNNSAReassign.h" #include "AMDGPU.h" #include "GCNSubtarget.h" #include "SIMachineFunctionInfo.h" @@ -34,26 +35,12 @@ STATISTIC(NumNSAConverted, "Number of NSA instructions changed to sequential"); namespace { - -class GCNNSAReassign : public MachineFunctionPass { +class GCNNSAReassignImpl { public: - static char ID; - - GCNNSAReassign() : MachineFunctionPass(ID) { - initializeGCNNSAReassignPass(*PassRegistry::getPassRegistry()); - } - - bool runOnMachineFunction(MachineFunction &MF) override; + GCNNSAReassignImpl(VirtRegMap *VM, LiveRegMatrix *LM, LiveIntervals *LS) + : VRM(VM), LRM(LM), LIS(LS) {} - StringRef getPassName() const override { return "GCN NSA Reassign"; } - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); - AU.addRequired(); - AU.addRequired(); - AU.setPreservesAll(); - MachineFunctionPass::getAnalysisUsage(AU); - } + bool run(MachineFunction &MF); private: using NSA_Status = enum { @@ -90,24 +77,43 @@ class GCNNSAReassign : public MachineFunctionPass { bool scavengeRegs(SmallVectorImpl &Intervals) const; }; +class GCNNSAReassignLegacy : public MachineFunctionPass { +public: + static char ID; + + GCNNSAReassignLegacy() : MachineFunctionPass(ID) { + initializeGCNNSAReassignLegacyPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + StringRef getPassName() const override { return "GCN NSA Reassign"; }; + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); + } +}; + } // End anonymous namespace. -INITIALIZE_PASS_BEGIN(GCNNSAReassign, DEBUG_TYPE, "GCN NSA Reassign", +INITIALIZE_PASS_BEGIN(GCNNSAReassignLegacy, DEBUG_TYPE, "GCN NSA Reassign", false, false) INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) INITIALIZE_PASS_DEPENDENCY(VirtRegMapWrapperLegacy) INITIALIZE_PASS_DEPENDENCY(LiveRegMatrixWrapperLegacy) -INITIALIZE_PASS_END(GCNNSAReassign, DEBUG_TYPE, "GCN NSA Reassign", - false, false) - +INITIALIZE_PASS_END(GCNNSAReassignLegacy, DEBUG_TYPE, "GCN NSA Reassign", false, + false) -char GCNNSAReassign::ID = 0; +char GCNNSAReassignLegacy::ID = 0; -char &llvm::GCNNSAReassignID = GCNNSAReassign::ID; +char &llvm::GCNNSAReassignID = GCNNSAReassignLegacy::ID; -bool -GCNNSAReassign::tryAssignRegisters(SmallVectorImpl &Intervals, - unsigned StartReg) const { +bool GCNNSAReassignImpl::tryAssignRegisters( + SmallVectorImpl &Intervals, unsigned StartReg) const { unsigned NumRegs = Intervals.size(); for (unsigned N = 0; N < NumRegs; ++N) @@ -124,7 +130,7 @@ GCNNSAReassign::tryAssignRegisters(SmallVectorImpl &Intervals, return true; } -bool GCNNSAReassign::canAssign(unsigned StartReg, unsigned NumRegs) const { +bool GCNNSAReassignImpl::canAssign(unsigned StartReg, unsigned NumRegs) const { for (unsigned N = 0; N < NumRegs; ++N) { unsigned Reg = StartReg + N; if (!MRI->isAllocatable(Reg)) @@ -139,8 +145,8 @@ bool GCNNSAReassign::canAssign(unsigned StartReg, unsigned NumRegs) const { return true; } -bool -GCNNSAReassign::scavengeRegs(SmallVectorImpl &Intervals) const { +bool GCNNSAReassignImpl::scavengeRegs( + SmallVectorImpl &Intervals) const { unsigned NumRegs = Intervals.size(); if (NumRegs > MaxNumVGPRs) @@ -158,8 +164,8 @@ GCNNSAReassign::scavengeRegs(SmallVectorImpl &Intervals) const { return false; } -GCNNSAReassign::NSA_Status -GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const { +GCNNSAReassignImpl::NSA_Status +GCNNSAReassignImpl::CheckNSA(const MachineInstr &MI, bool Fast) const { const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); if (!Info) return NSA_Status::NOT_NSA; @@ -235,16 +241,13 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const { return NSA ? NSA_Status::NON_CONTIGUOUS : NSA_Status::CONTIGUOUS; } -bool GCNNSAReassign::runOnMachineFunction(MachineFunction &MF) { +bool GCNNSAReassignImpl::run(MachineFunction &MF) { ST = &MF.getSubtarget(); if (!ST->hasNSAEncoding() || !ST->hasNonNSAEncoding()) return false; MRI = &MF.getRegInfo(); TRI = ST->getRegisterInfo(); - VRM = &getAnalysis().getVRM(); - LRM = &getAnalysis().getLRM(); - LIS = &getAnalysis().getLIS(); const SIMachineFunctionInfo *MFI = MF.getInfo(); MaxNumVGPRs = ST->getMaxNumVGPRs(MF); @@ -367,3 +370,24 @@ bool GCNNSAReassign::runOnMachineFunction(MachineFunction &MF) { return Changed; } + +bool GCNNSAReassignLegacy::runOnMachineFunction(MachineFunction &MF) { + auto *VRM = &getAnalysis().getVRM(); + auto *LRM = &getAnalysis().getLRM(); + auto *LIS = &getAnalysis().getLIS(); + + GCNNSAReassignImpl Impl(VRM, LRM, LIS); + return Impl.run(MF); +} + +PreservedAnalyses +GCNNSAReassignPass::run(MachineFunction &MF, + MachineFunctionAnalysisManager &MFAM) { + auto &VRM = MFAM.getResult(MF); + auto &LRM = MFAM.getResult(MF); + auto &LIS = MFAM.getResult(MF); + + GCNNSAReassignImpl Impl(&VRM, &LRM, &LIS); + Impl.run(MF); + return PreservedAnalyses::all(); +} diff --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.h b/llvm/lib/Target/AMDGPU/GCNNSAReassign.h new file mode 100644 index 0000000000000..97a72e7ddbb24 --- /dev/null +++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.h @@ -0,0 +1,22 @@ +//===- GCNNSAReassign.h -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_AMDGPU_GCNNSAREASSIGN_H +#define LLVM_LIB_TARGET_AMDGPU_GCNNSAREASSIGN_H + +#include "llvm/CodeGen/MachinePassManager.h" + +namespace llvm { +class GCNNSAReassignPass : public PassInfoMixin { +public: + PreservedAnalyses run(MachineFunction &MF, + MachineFunctionAnalysisManager &MFAM); +}; +} // namespace llvm + +#endif // LLVM_LIB_TARGET_AMDGPU_GCNNSAREASSIGN_H diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 999553bfaff38..ab396929162d0 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -123,7 +123,7 @@ class SIFoldOperandsImpl { SmallVectorImpl &FoldList, SmallVectorImpl &CopiesToReplace) const; - MachineOperand *getImmOrMaterializedImm(MachineOperand &Op) const; + std::optional getImmOrMaterializedImm(MachineOperand &Op) const; bool tryConstantFoldOp(MachineInstr *MI) const; bool tryFoldCndMask(MachineInstr &MI) const; bool tryFoldZeroHighBits(MachineInstr &MI) const; @@ -824,8 +824,7 @@ bool SIFoldOperandsImpl::tryToFoldACImm( return false; uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType; - if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && - TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { + if (OpToFold.isImm() && TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm()); return true; } @@ -845,8 +844,7 @@ bool SIFoldOperandsImpl::tryToFoldACImm( MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) { MachineOperand &DefOp = Def->getOperand(1); - if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) && - TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) { + if (DefOp.isImm() && TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) { UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm()); return true; } @@ -1298,21 +1296,22 @@ static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { MI.removeOperand(I); } -MachineOperand * +std::optional SIFoldOperandsImpl::getImmOrMaterializedImm(MachineOperand &Op) const { - // If this has a subregister, it obviously is a register source. - if (!Op.isReg() || Op.getSubReg() != AMDGPU::NoSubRegister || - !Op.getReg().isVirtual()) - return &Op; + if (Op.isImm()) + return Op.getImm(); - MachineInstr *Def = MRI->getVRegDef(Op.getReg()); + if (!Op.isReg() || !Op.getReg().isVirtual()) + return std::nullopt; + + const MachineInstr *Def = MRI->getVRegDef(Op.getReg()); if (Def && Def->isMoveImmediate()) { - MachineOperand &ImmSrc = Def->getOperand(1); + const MachineOperand &ImmSrc = Def->getOperand(1); if (ImmSrc.isImm()) - return &ImmSrc; + return TII->extractSubregFromImm(ImmSrc.getImm(), Op.getSubReg()); } - return &Op; + return std::nullopt; } // Try to simplify operations with a constant that may appear after instruction @@ -1327,12 +1326,14 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const { int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); if (Src0Idx == -1) return false; - MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx)); + + MachineOperand *Src0 = &MI->getOperand(Src0Idx); + std::optional Src0Imm = getImmOrMaterializedImm(*Src0); if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || Opc == AMDGPU::S_NOT_B32) && - Src0->isImm()) { - MI->getOperand(1).ChangeToImmediate(~Src0->getImm()); + Src0Imm) { + MI->getOperand(1).ChangeToImmediate(~*Src0Imm); mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); return true; } @@ -1340,17 +1341,19 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const { int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); if (Src1Idx == -1) return false; - MachineOperand *Src1 = getImmOrMaterializedImm(MI->getOperand(Src1Idx)); - if (!Src0->isImm() && !Src1->isImm()) + MachineOperand *Src1 = &MI->getOperand(Src1Idx); + std::optional Src1Imm = getImmOrMaterializedImm(*Src1); + + if (!Src0Imm && !Src1Imm) return false; // and k0, k1 -> v_mov_b32 (k0 & k1) // or k0, k1 -> v_mov_b32 (k0 | k1) // xor k0, k1 -> v_mov_b32 (k0 ^ k1) - if (Src0->isImm() && Src1->isImm()) { + if (Src0Imm && Src1Imm) { int32_t NewImm; - if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) + if (!evalBinaryInstruction(Opc, NewImm, *Src0Imm, *Src1Imm)) return false; bool IsSGPR = TRI->isSGPRReg(*MRI, MI->getOperand(0).getReg()); @@ -1366,12 +1369,13 @@ bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const { if (!MI->isCommutable()) return false; - if (Src0->isImm() && !Src1->isImm()) { + if (Src0Imm && !Src1Imm) { std::swap(Src0, Src1); std::swap(Src0Idx, Src1Idx); + std::swap(Src0Imm, Src1Imm); } - int32_t Src1Val = static_cast(Src1->getImm()); + int32_t Src1Val = static_cast(*Src1Imm); if (Opc == AMDGPU::V_OR_B32_e64 || Opc == AMDGPU::V_OR_B32_e32 || Opc == AMDGPU::S_OR_B32) { @@ -1428,9 +1432,12 @@ bool SIFoldOperandsImpl::tryFoldCndMask(MachineInstr &MI) const { MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); if (!Src1->isIdenticalTo(*Src0)) { - auto *Src0Imm = getImmOrMaterializedImm(*Src0); - auto *Src1Imm = getImmOrMaterializedImm(*Src1); - if (!Src1Imm->isIdenticalTo(*Src0Imm)) + std::optional Src1Imm = getImmOrMaterializedImm(*Src1); + if (!Src1Imm) + return false; + + std::optional Src0Imm = getImmOrMaterializedImm(*Src0); + if (!Src0Imm || *Src0Imm != *Src1Imm) return false; } @@ -1463,8 +1470,8 @@ bool SIFoldOperandsImpl::tryFoldZeroHighBits(MachineInstr &MI) const { MI.getOpcode() != AMDGPU::V_AND_B32_e32) return false; - MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1)); - if (!Src0->isImm() || Src0->getImm() != 0xffff) + std::optional Src0Imm = getImmOrMaterializedImm(MI.getOperand(1)); + if (!Src0Imm || *Src0Imm != 0xffff) return false; Register Src1 = MI.getOperand(2).getReg(); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 4ee5ebd7681b8..ceab6c9dcca34 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3437,6 +3437,30 @@ void SIInstrInfo::removeModOperands(MachineInstr &MI) const { } } +std::optional SIInstrInfo::extractSubregFromImm(int64_t Imm, + unsigned SubRegIndex) { + switch (SubRegIndex) { + case AMDGPU::NoSubRegister: + return Imm; + case AMDGPU::sub0: + return Lo_32(Imm); + case AMDGPU::sub1: + return Hi_32(Imm); + case AMDGPU::lo16: + return SignExtend64<16>(Imm); + case AMDGPU::hi16: + return SignExtend64<16>(Imm >> 16); + case AMDGPU::sub1_lo16: + return SignExtend64<16>(Imm >> 32); + case AMDGPU::sub1_hi16: + return SignExtend64<16>(Imm >> 48); + default: + return std::nullopt; + } + + llvm_unreachable("covered subregister switch"); +} + bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const { if (!MRI->hasOneNonDBGUse(Reg)) @@ -3446,25 +3470,6 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, if (!getConstValDefinedInReg(DefMI, Reg, Imm)) return false; - auto getImmFor = [=](const MachineOperand &UseOp) -> int64_t { - switch (UseOp.getSubReg()) { - default: - return Imm; - case AMDGPU::sub0: - return Lo_32(Imm); - case AMDGPU::sub1: - return Hi_32(Imm); - case AMDGPU::lo16: - return SignExtend64<16>(Imm); - case AMDGPU::hi16: - return SignExtend64<16>(Imm >> 16); - case AMDGPU::sub1_lo16: - return SignExtend64<16>(Imm >> 32); - case AMDGPU::sub1_hi16: - return SignExtend64<16>(Imm >> 48); - } - }; - assert(!DefMI.getOperand(0).getSubReg() && "Expected SSA form"); unsigned Opc = UseMI.getOpcode(); @@ -3480,7 +3485,11 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, : AMDGPU::V_MOV_B32_e32 : Is64Bit ? AMDGPU::S_MOV_B64_IMM_PSEUDO : AMDGPU::S_MOV_B32; - APInt Imm(Is64Bit ? 64 : 32, getImmFor(UseMI.getOperand(1)), + + std::optional SubRegImm = + extractSubregFromImm(Imm, UseMI.getOperand(1).getSubReg()); + + APInt Imm(Is64Bit ? 64 : 32, *SubRegImm, /*isSigned=*/true, /*implicitTrunc=*/true); if (RI.isAGPR(*MRI, DstReg)) { @@ -3591,7 +3600,8 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, if (NewOpc == AMDGPU::V_FMAMK_F16_fake16) return false; - const int64_t Imm = getImmFor(RegSrc == Src1 ? *Src0 : *Src1); + const std::optional SubRegImm = extractSubregFromImm( + Imm, RegSrc == Src1 ? Src0->getSubReg() : Src1->getSubReg()); // FIXME: This would be a lot easier if we could return a new instruction // instead of having to modify in place. @@ -3608,7 +3618,7 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, UseMI.untieRegOperand( AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); - Src1->ChangeToImmediate(Imm); + Src1->ChangeToImmediate(*SubRegImm); removeModOperands(UseMI); UseMI.setDesc(get(NewOpc)); @@ -3679,8 +3689,11 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, UseMI.untieRegOperand( AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); + const std::optional SubRegImm = + extractSubregFromImm(Imm, Src2->getSubReg()); + // ChangingToImmediate adds Src2 back to the instruction. - Src2->ChangeToImmediate(getImmFor(*Src2)); + Src2->ChangeToImmediate(*SubRegImm); // These come before src2. removeModOperands(UseMI); @@ -5918,11 +5931,17 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, if (!MO) MO = &MI.getOperand(OpIdx); + const MachineOperand *UsedLiteral = nullptr; + int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); int LiteralLimit = !isVOP3(MI) || ST.hasVOP3Literal() ? 1 : 0; if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { - if (!MO->isReg() && !isInlineConstant(*MO, OpInfo) && !LiteralLimit--) - return false; + if (!MO->isReg() && !isInlineConstant(*MO, OpInfo)) { + if (!LiteralLimit--) + return false; + + UsedLiteral = MO; + } SmallDenseSet SGPRsUsed; if (MO->isReg()) @@ -5943,6 +5962,12 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, } } else if (AMDGPU::isSISrcOperand(InstDesc, i) && !isInlineConstant(Op, InstDesc.operands()[i])) { + // The same literal may be used multiple times. + if (!UsedLiteral) + UsedLiteral = &Op; + else if (UsedLiteral->isIdenticalTo(Op)) + continue; + if (!LiteralLimit--) return false; if (--ConstantBusLimit <= 0) diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index ddd15e1766f70..79ecc2a657ed0 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -401,6 +401,15 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { void removeModOperands(MachineInstr &MI) const; + /// Return the extracted immediate value in a subregister use from a constant + /// materialized in a super register. + /// + /// e.g. %imm = S_MOV_B64 K[0:63] + /// USE %imm.sub1 + /// This will return K[32:63] + static std::optional extractSubregFromImm(int64_t ImmVal, + unsigned SubRegIndex); + bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const final; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index bb78e77a9dc1a..e30e257da6873 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -913,7 +913,7 @@ class VGPRImm : PatLeafgetConstant(-N->getSExtValue(), SDLoc(N), MVT::i32); + return CurDAG->getSignedConstant(-N->getSExtValue(), SDLoc(N), MVT::i32); }]>; // TODO: When FP inline imm values work? @@ -2483,8 +2483,15 @@ class getHasExt { + // This type of operands is only used in pseudo instructions helping + // code generation and thus doesn't need encoding and decoding methods. + // It also doesn't need to support AGPRs, because GFX908/A/40 do not + // support True16. + defvar VLdSt_16 = RegisterOperand; + RegisterOperand ret = - !cond(!eq(RC.Size, 32) : AVLdSt_32, + !cond(!eq(RC.Size, 16) : VLdSt_16, + !eq(RC.Size, 32) : AVLdSt_32, !eq(RC.Size, 64) : AVLdSt_64, !eq(RC.Size, 96) : AVLdSt_96, !eq(RC.Size, 128) : AVLdSt_128, diff --git a/llvm/lib/Target/AMDGPU/SIProgramInfo.cpp b/llvm/lib/Target/AMDGPU/SIProgramInfo.cpp index 5179288084010..b4d740422b94a 100644 --- a/llvm/lib/Target/AMDGPU/SIProgramInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIProgramInfo.cpp @@ -212,11 +212,16 @@ uint64_t SIProgramInfo::getFunctionCodeSize(const MachineFunction &MF) { uint64_t CodeSize = 0; for (const MachineBasicBlock &MBB : MF) { + // The amount of padding to align code can be both underestimated and + // overestimated. In case of inline asm used getInstSizeInBytes() will + // return a maximum size of a single instruction, where the real size may + // differ. At this point CodeSize may be already off. + CodeSize = alignTo(CodeSize, MBB.getAlignment()); + for (const MachineInstr &MI : MBB) { // TODO: CodeSize should account for multiple functions. - // TODO: Should we count size of debug info? - if (MI.isDebugInstr()) + if (MI.isMetaInstruction()) continue; CodeSize += TII->getInstSizeInBytes(MI); diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 59afcbed35294..c521d0dd3ad2d 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -430,6 +430,7 @@ struct FP4FP8DstByteSelInfo { #define GET_VOPDPairs_IMPL #define GET_VOPTrue16Table_DECL #define GET_VOPTrue16Table_IMPL +#define GET_True16D16Table_IMPL #define GET_WMMAOpcode2AddrMappingTable_DECL #define GET_WMMAOpcode2AddrMappingTable_IMPL #define GET_WMMAOpcode3AddrMappingTable_DECL diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index e458b6b9604b6..103993e6435de 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -113,6 +113,12 @@ struct CvtScaleF32_F32F16ToF8F4_Info { unsigned Opcode; }; +struct True16D16Info { + unsigned T16Op; + unsigned HiOp; + unsigned LoOp; +}; + #define GET_MIMGBaseOpcode_DECL #define GET_MIMGDim_DECL #define GET_MIMGEncoding_DECL @@ -123,6 +129,7 @@ struct CvtScaleF32_F32F16ToF8F4_Info { #define GET_MAIInstInfoTable_DECL #define GET_isMFMA_F8F6F4Table_DECL #define GET_isCvtScaleF32_F32F16ToF8F4Table_DECL +#define GET_True16D16Table_DECL #include "AMDGPUGenSearchableTables.inc" namespace IsaInfo { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 2bac1d0086041..eb1491feb611e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -20786,9 +20786,9 @@ ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); if (Align) - SP = - DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); + SP = DAG.getNode( + ISD::AND, DL, MVT::i32, SP.getValue(0), + DAG.getSignedConstant(-(uint64_t)Align->value(), DL, MVT::i32)); Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); SDValue Ops[2] = { SP, Chain }; return DAG.getMergeValues(Ops, DL); diff --git a/llvm/lib/Target/DirectX/DXILRootSignature.cpp b/llvm/lib/Target/DirectX/DXILRootSignature.cpp index 0fecbd698bc5f..fd390cdbf9057 100644 --- a/llvm/lib/Target/DirectX/DXILRootSignature.cpp +++ b/llvm/lib/Target/DirectX/DXILRootSignature.cpp @@ -150,11 +150,17 @@ analyzeModule(Module &M) { continue; } - MDNode *RootElementListNode = - dyn_cast(RSDefNode->getOperand(1).get()); + Metadata *RootElementListOperand = RSDefNode->getOperand(1).get(); + if (RootElementListOperand == nullptr) { + reportError(Ctx, "Root Element mdnode is null."); + continue; + } + + MDNode *RootElementListNode = dyn_cast(RootElementListOperand); if (RootElementListNode == nullptr) { - reportError(Ctx, "Missing Root Element List Metadata node."); + reportError(Ctx, "Root Element is not a metadata node."); + continue; } mcdxbc::RootSignatureDesc RSD; diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 10db4f552cdcf..c0baf301e0624 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -617,7 +617,8 @@ void HexagonDAGToDAGISel::SelectSHL(SDNode *N) { if (ConstantSDNode *C2 = dyn_cast(Shl2_1)) { int32_t ValConst = 1 << (ShlConst + C2->getSExtValue()); if (isInt<9>(-ValConst)) { - SDValue Val = CurDAG->getTargetConstant(-ValConst, dl, MVT::i32); + SDValue Val = + CurDAG->getSignedTargetConstant(-ValConst, dl, MVT::i32); SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl, MVT::i32, Shl2_0, Val); ReplaceNode(N, Result); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index b31360b4096da..d66e3e306d2ff 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3491,7 +3491,7 @@ HexagonTargetLowering::PerformDAGCombine(SDNode *N, SDValue P = Op.getOperand(0); switch (P.getOpcode()) { case HexagonISD::PTRUE: - return DCI.DAG.getConstant(-1, dl, ty(Op)); + return DCI.DAG.getAllOnesConstant(dl, ty(Op)); case HexagonISD::PFALSE: return getZero(dl, ty(Op), DCI.DAG); default: diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp index 4e4467c76aff5..0760d712f9afd 100644 --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -1174,8 +1174,8 @@ auto AlignVectors::realignLoadGroup(IRBuilderBase &Builder, for (const ByteSpan::Block &B : VSpan) { ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size); for (const ByteSpan::Block &S : ASection) { - EarliestUser[S.Seg.Val] = std::min( - EarliestUser[S.Seg.Val], earliestUser(B.Seg.Val->uses()), isEarlier); + auto &EU = EarliestUser[S.Seg.Val]; + EU = std::min(EU, earliestUser(B.Seg.Val->uses()), isEarlier); } } diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp index d34f45fcac008..e42e738b9973f 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp @@ -84,7 +84,7 @@ void NVPTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { - unsigned Reg = Op.getReg(); + MCRegister Reg = Op.getReg(); printRegName(O, Reg); } else if (Op.isImm()) { markup(O, Markup::Immediate) << formatImm(Op.getImm()); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp index 7511e24f705c1..74404822757ed 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp @@ -492,9 +492,9 @@ void PPCInstPrinter::printAbsBranchOperand(const MCInst *MI, unsigned OpNo, void PPCInstPrinter::printcrbitm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned CCReg = MI->getOperand(OpNo).getReg(); + MCRegister CCReg = MI->getOperand(OpNo).getReg(); unsigned RegNo; - switch (CCReg) { + switch (CCReg.id()) { default: llvm_unreachable("Unknown CR register"); case PPC::CR0: RegNo = 0; break; case PPC::CR1: RegNo = 1; break; @@ -648,7 +648,7 @@ void PPCInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { - unsigned Reg = Op.getReg(); + MCRegister Reg = Op.getReg(); if (!ShowVSRNumsAsVR) Reg = PPC::getRegNumForOperand(MII.get(MI->getOpcode()), Reg, OpNo); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index cef3b7c851c79..2539c6e49686a 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -488,7 +488,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, MI.getOpcode() != PPC::MFOCRF && MI.getOpcode() != PPC::MFOCRF8) || MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7); unsigned OpNo = getOpIdxForMO(MI, MO); - unsigned Reg = + MCRegister Reg = PPC::getRegNumForOperand(MCII.get(MI.getOpcode()), MO.getReg(), OpNo); return CTX.getRegisterInfo()->getEncodingValue(Reg); } diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp index 61be6abaacd2f..0a0facb10e48a 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -117,8 +117,8 @@ const char *PPC::stripRegisterPrefix(const char *RegName) { /// The operand number argument will be useful when we need to extend this /// to instructions that use both Altivec and VSX numbering (for different /// operands). -unsigned PPC::getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, - unsigned OpNo) { +MCRegister PPC::getRegNumForOperand(const MCInstrDesc &Desc, MCRegister Reg, + unsigned OpNo) { int16_t regClass = Desc.operands()[OpNo].RegClass; switch (regClass) { // We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h index 579ee5e8facb6..d6744014949ce 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h @@ -47,8 +47,8 @@ const char *stripRegisterPrefix(const char *RegName); /// The operand number argument will be useful when we need to extend this /// to instructions that use both Altivec and VSX numbering (for different /// operands). -unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, - unsigned OpNo); +MCRegister getRegNumForOperand(const MCInstrDesc &Desc, MCRegister Reg, + unsigned OpNo); } // namespace PPC diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp index 5784fe43879fe..ae42da6ea6e42 100644 --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -984,7 +984,7 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) { // Get the offset from the GOT Base Register to the GOT LowerPPCMachineInstrToMCInst(MI, TmpInst, *this); if (Subtarget->isSecurePlt() && isPositionIndependent() ) { - unsigned PICR = TmpInst.getOperand(0).getReg(); + MCRegister PICR = TmpInst.getOperand(0).getReg(); MCSymbol *BaseSymbol = OutContext.getOrCreateSymbol( M->getPICLevel() == PICLevel::SmallPIC ? "_GLOBAL_OFFSET_TABLE_" : ".LTOC"); diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp index ba775c4a679d0..f88af657c8ad5 100644 --- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -267,7 +267,7 @@ static bool hasNonRISpills(const MachineFunction &MF) { /// MustSaveLR - Return true if this function requires that we save the LR /// register onto the stack in the prolog and restore it in the epilog of the /// function. -static bool MustSaveLR(const MachineFunction &MF, unsigned LR) { +static bool MustSaveLR(const MachineFunction &MF, MCRegister LR) { const PPCFunctionInfo *MFI = MF.getInfo(); // We need a save/restore of LR if there is any def of LR (which is @@ -311,7 +311,7 @@ PPCFrameLowering::determineFrameLayout(const MachineFunction &MF, const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); - unsigned LR = RegInfo->getRARegister(); + MCRegister LR = RegInfo->getRARegister(); bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone); bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca. !MFI.adjustsStack() && // No calls. @@ -1987,7 +1987,7 @@ void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF, // Save and clear the LR state. PPCFunctionInfo *FI = MF.getInfo(); - unsigned LR = RegInfo->getRARegister(); + MCRegister LR = RegInfo->getRARegister(); FI->setMustSaveLR(MustSaveLR(MF, LR)); SavedRegs.reset(LR); @@ -2344,8 +2344,8 @@ bool PPCFrameLowering::assignCalleeSavedSpillSlots( for (auto &CalleeSaveReg : CSI) { MCPhysReg Reg = CalleeSaveReg.getReg(); - MCPhysReg Lower = RegInfo->getSubReg(Reg, 1); - MCPhysReg Higher = RegInfo->getSubReg(Reg, 2); + MCRegister Lower = RegInfo->getSubReg(Reg, PPC::sub_32); + MCRegister Higher = RegInfo->getSubReg(Reg, PPC::sub_32_hi_phony); if ( // Check only for SuperRegs. Lower && diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 767d1ded8de3a..4720928f472b3 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8883,8 +8883,8 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, Round = DAG.getNode(ISD::ADD, dl, MVT::i64, Round, DAG.getConstant(2047, dl, MVT::i64)); Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); - Round = DAG.getNode(ISD::AND, dl, MVT::i64, - Round, DAG.getConstant(-2048, dl, MVT::i64)); + Round = DAG.getNode(ISD::AND, dl, MVT::i64, Round, + DAG.getSignedConstant(-2048, dl, MVT::i64)); // However, we cannot use that value unconditionally: if the magnitude // of the input value is small, the bit-twiddling we did above might @@ -9244,7 +9244,7 @@ SDValue PPCTargetLowering::LowerGET_ROUNDING(SDValue Op, SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - unsigned BitWidth = VT.getSizeInBits(); + uint64_t BitWidth = VT.getSizeInBits(); SDLoc dl(Op); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && @@ -9263,7 +9263,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, - DAG.getConstant(-BitWidth, dl, AmtVT)); + DAG.getSignedConstant(-BitWidth, dl, AmtVT)); SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); @@ -9274,7 +9274,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned BitWidth = VT.getSizeInBits(); + uint64_t BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRL!"); @@ -9292,7 +9292,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, - DAG.getConstant(-BitWidth, dl, AmtVT)); + DAG.getSignedConstant(-BitWidth, dl, AmtVT)); SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); @@ -9303,7 +9303,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); EVT VT = Op.getValueType(); - unsigned BitWidth = VT.getSizeInBits(); + uint64_t BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SRA!"); @@ -9320,7 +9320,7 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, - DAG.getConstant(-BitWidth, dl, AmtVT)); + DAG.getSignedConstant(-BitWidth, dl, AmtVT)); SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), @@ -18308,7 +18308,7 @@ static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, SDValue AddOrZ = NegConstant != 0 ? Add : Z; SDValue Addc = DAG.getNode(ISD::UADDO_CARRY, DL, DAG.getVTList(MVT::i64, CarryType), - AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64), + AddOrZ, DAG.getAllOnesConstant(DL, MVT::i64), DAG.getConstant(0, DL, CarryType)); return DAG.getNode(ISD::UADDO_CARRY, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index ac87d72b7595c..6d4466b7abf53 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -2311,7 +2311,7 @@ ParseStatus RISCVAsmParser::parseVTypeI(OperandVector &Operands) { } if (getLexer().is(AsmToken::EndOfStatement) && State == VTypeState_Done) { - RISCVII::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional); + RISCVVType::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional); if (Fractional) { unsigned ELEN = STI->hasFeature(RISCV::FeatureStdExtZve64x) ? 64 : 32; unsigned MaxSEW = ELEN / Lmul; diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 6f0645965d737..56b1639143d8b 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -1120,7 +1120,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI, // divide exactly. assert( RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second || - RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1); + RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVVType::LMUL_1); // If the vector type is an LMUL-group type, extract a subvector equal to the // nearest full vector register type. @@ -1143,7 +1143,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI, const LLT XLenTy(STI.getXLenVT()); auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx); auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI); - uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + uint64_t Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; auto Slidedown = MIB.buildInstr( RISCV::G_VSLIDEDOWN_VL, {InterLitTy}, {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy}); @@ -1265,10 +1265,10 @@ bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI, // Use tail agnostic policy if we're inserting over InterLitTy's tail. ElementCount EndIndex = ElementCount::getScalable(RemIdx) + LitTy.getElementCount(); - uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + uint64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (STI.expandVScale(EndIndex) == STI.expandVScale(InterLitTy.getElementCount())) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; Inserted = MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst}, diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp index fb0dc482e6081..0881de90700ab 100644 --- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp +++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp @@ -107,32 +107,32 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) { LLVM_DEBUG(dbgs() << "RVCB: Found VSETVLI and creating instrument for it: " << Inst << "\n"); unsigned VTypeI = Inst.getOperand(2).getImm(); - RISCVII::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI); + RISCVVType::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI); StringRef LMUL; switch (VLMUL) { - case RISCVII::LMUL_1: + case RISCVVType::LMUL_1: LMUL = "M1"; break; - case RISCVII::LMUL_2: + case RISCVVType::LMUL_2: LMUL = "M2"; break; - case RISCVII::LMUL_4: + case RISCVVType::LMUL_4: LMUL = "M4"; break; - case RISCVII::LMUL_8: + case RISCVVType::LMUL_8: LMUL = "M8"; break; - case RISCVII::LMUL_F2: + case RISCVVType::LMUL_F2: LMUL = "MF2"; break; - case RISCVII::LMUL_F4: + case RISCVVType::LMUL_F4: LMUL = "MF4"; break; - case RISCVII::LMUL_F8: + case RISCVVType::LMUL_F8: LMUL = "MF8"; break; - case RISCVII::LMUL_RESERVED: + case RISCVVType::LMUL_RESERVED: llvm_unreachable("Cannot create instrument for LMUL_RESERVED"); } SmallVector Instruments; @@ -166,7 +166,7 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) { } static std::pair -getEEWAndEMUL(unsigned Opcode, RISCVII::VLMUL LMUL, uint8_t SEW) { +getEEWAndEMUL(unsigned Opcode, RISCVVType::VLMUL LMUL, uint8_t SEW) { uint8_t EEW; switch (Opcode) { case RISCV::VLM_V: @@ -249,7 +249,7 @@ unsigned RISCVInstrumentManager::getSchedClassID( const RISCVVInversePseudosTable::PseudoInfo *RVV = nullptr; if (opcodeHasEEWAndEMULInfo(Opcode)) { - RISCVII::VLMUL VLMUL = static_cast(LMUL); + RISCVVType::VLMUL VLMUL = static_cast(LMUL); auto [EEW, EMUL] = getEEWAndEMUL(Opcode, VLMUL, SEW); RVV = RISCVVInversePseudosTable::getBaseInfo(Opcode, EMUL, EEW); } else { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 2f4b569041a6f..58eb48ed613df 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -65,13 +65,9 @@ enum { VLMulShift = ConstraintShift + 3, VLMulMask = 0b111 << VLMulShift, - // Force a tail agnostic policy even this instruction has a tied destination. - ForceTailAgnosticShift = VLMulShift + 3, - ForceTailAgnosticMask = 1 << ForceTailAgnosticShift, - // Is this a _TIED vector pseudo instruction. For these instructions we // shouldn't skip the tied operand when converting to MC instructions. - IsTiedPseudoShift = ForceTailAgnosticShift + 1, + IsTiedPseudoShift = VLMulShift + 3, IsTiedPseudoMask = 1 << IsTiedPseudoShift, // Does this instruction have a SEW operand. It will be the last explicit @@ -145,12 +141,8 @@ static inline unsigned getFormat(uint64_t TSFlags) { return (TSFlags & InstFormatMask) >> InstFormatShift; } /// \returns the LMUL for the instruction. -static inline VLMUL getLMul(uint64_t TSFlags) { - return static_cast((TSFlags & VLMulMask) >> VLMulShift); -} -/// \returns true if tail agnostic is enforced for the instruction. -static inline bool doesForceTailAgnostic(uint64_t TSFlags) { - return TSFlags & ForceTailAgnosticMask; +static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) { + return static_cast((TSFlags & VLMulMask) >> VLMulShift); } /// \returns true if this a _TIED pseudo. static inline bool isTiedPseudo(uint64_t TSFlags) { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index d5254719b3839..a4a40862a67c6 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -210,7 +210,7 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo, unsigned Imm = MI->getOperand(OpNo).getImm(); // Print the raw immediate for reserved values: vlmul[2:0]=4, vsew[2:0]=0b1xx, // or non-zero in bits 8 and above. - if (RISCVVType::getVLMUL(Imm) == RISCVII::VLMUL::LMUL_RESERVED || + if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED || RISCVVType::getSEW(Imm) > 64 || (Imm >> 8) != 0) { O << formatImm(Imm); return; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index fb2c5c62ef871..7ea4bd94c0065 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -279,7 +279,7 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands( // none of the others do. All have passthru operands. For our pseudos, // all loads have policy operands. if (IsLoad) { - uint64_t Policy = RISCVII::MASK_AGNOSTIC; + uint64_t Policy = RISCVVType::MASK_AGNOSTIC; if (IsMasked) Policy = Node->getConstantOperandVal(CurOp++); SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); @@ -294,7 +294,7 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -324,7 +324,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, unsigned NF, MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -355,7 +355,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -379,7 +379,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, "Element count mismatch"); #endif - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -404,7 +404,7 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -430,7 +430,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -454,7 +454,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, "Element count mismatch"); #endif - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -495,7 +495,7 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) { unsigned SEW = RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7); - RISCVII::VLMUL VLMul = static_cast( + RISCVVType::VLMUL VLMul = static_cast( Node->getConstantOperandVal(Offset + 1) & 0x7); unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true, @@ -1672,7 +1672,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMSLT_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ : RISCV::PseudoVMSLT_VX_##suffix; \ VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \ @@ -1692,7 +1692,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \ break; @@ -1768,7 +1768,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMSLT_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ : RISCV::PseudoVMSLT_VX_##suffix; \ VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \ @@ -1790,7 +1790,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \ VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \ @@ -1838,13 +1838,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { return; } + SDValue PolicyOp = + CurDAG->getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); + if (IsCmpConstant) { SDValue Imm = selectImm(CurDAG, SDLoc(Src2), XLenVT, CVal - 1, *Subtarget); ReplaceNode(Node, CurDAG->getMachineNode( VMSGTMaskOpcode, DL, VT, - {MaskedOff, Src1, Imm, Mask, VL, SEW})); + {MaskedOff, Src1, Imm, Mask, VL, SEW, PolicyOp})); return; } @@ -1853,10 +1856,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { // The result is mask undisturbed. // We use the same instructions to emulate mask agnostic behavior, because // the agnostic result can be either undisturbed or all 1. - SDValue Cmp = SDValue( - CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT, - {MaskedOff, Src1, Src2, Mask, VL, SEW}), - 0); + SDValue Cmp = SDValue(CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT, + {MaskedOff, Src1, Src2, Mask, + VL, SEW, PolicyOp}), + 0); // vmxor.mm vd, vd, v0 is used to update active value. ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT, {Cmp, Mask, VL, MaskSEW})); @@ -2002,8 +2005,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -2055,7 +2058,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands, /*IsLoad=*/true); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); @@ -2082,7 +2085,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { /*IsStridedOrIndexed*/ false, Operands, /*IsLoad=*/true); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW, static_cast(LMUL)); @@ -2208,8 +2211,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -2247,7 +2250,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( IsMasked, IsStrided, Log2SEW, static_cast(LMUL)); MachineSDNode *Store = @@ -2314,11 +2317,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { if (Idx != 0) break; - RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT); + RISCVVType::VLMUL SubVecLMUL = + RISCVTargetLowering::getLMUL(SubVecContainerVT); [[maybe_unused]] bool IsSubVecPartReg = - SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || - SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || - SubVecLMUL == RISCVII::VLMUL::LMUL_F8; + SubVecLMUL == RISCVVType::VLMUL::LMUL_F2 || + SubVecLMUL == RISCVVType::VLMUL::LMUL_F4 || + SubVecLMUL == RISCVVType::VLMUL::LMUL_F8; assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg || V.isUndef()) && "Expecting lowering to have created legal INSERT_SUBVECTORs when " @@ -2439,11 +2443,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { Ld->getBasePtr()}; if (IsStrided) Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT)); - uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC; + uint64_t Policy = RISCVVType::MASK_AGNOSTIC | RISCVVType::TAIL_AGNOSTIC; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); Operands.append({VL, SEW, PolicyOp, Ld->getChain()}); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( /*IsMasked*/ false, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); @@ -3792,9 +3796,9 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) { const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode()); const bool MaskedHasPassthru = RISCVII::isFirstDefTiedToFirstUse(MaskedMCID); - assert(RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) == - RISCVII::hasVecPolicyOp(MCID.TSFlags) && - "Masked and unmasked pseudos are inconsistent"); + assert((RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) || + !RISCVII::hasVecPolicyOp(MCID.TSFlags)) && + "Unmasked pseudo has policy but masked pseudo doesn't?"); assert(RISCVII::hasVecPolicyOp(MCID.TSFlags) == HasPassthru && "Unexpected pseudo structure"); assert(!(HasPassthru && !MaskedHasPassthru) && @@ -3803,11 +3807,18 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) { SmallVector Ops; // Skip the passthru operand at index 0 if the unmasked don't have one. bool ShouldSkip = !HasPassthru && MaskedHasPassthru; + bool DropPolicy = !RISCVII::hasVecPolicyOp(MCID.TSFlags) && + RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags); + bool HasChainOp = + N->getOperand(N->getNumOperands() - 1).getValueType() == MVT::Other; + unsigned LastOpNum = N->getNumOperands() - 1 - HasChainOp; for (unsigned I = ShouldSkip, E = N->getNumOperands(); I != E; I++) { // Skip the mask SDValue Op = N->getOperand(I); if (I == MaskOpIdx) continue; + if (DropPolicy && I == LastOpNum) + continue; Ops.push_back(Op); } @@ -3975,7 +3986,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { // preserve them. bool MergeVLShrunk = VL != OrigVL; uint64_t Policy = (isImplicitDef(Passthru) && !MergeVLShrunk) - ? RISCVII::TAIL_AGNOSTIC + ? RISCVVType::TAIL_AGNOSTIC : /*TUMU*/ 0; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c40ab0d09bdf6..98c25bc93a8a2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1110,7 +1110,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(FloatingPointLibCallOps, VT, Expand); // Custom split nxv32[b]f16 since nxv32[b]f32 is not legal. - if (getLMUL(VT) == RISCVII::VLMUL::LMUL_8) { + if (getLMUL(VT) == RISCVVType::LMUL_8) { setOperationAction(ZvfhminZvfbfminPromoteOps, VT, Custom); setOperationAction(ZvfhminZvfbfminPromoteVPOps, VT, Custom); } else { @@ -2361,25 +2361,25 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, } } -RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { +RISCVVType::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { if (VT.isRISCVVectorTuple()) { if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 && VT.SimpleTy <= MVT::riscv_nxv1i8x8) - return RISCVII::LMUL_F8; + return RISCVVType::LMUL_F8; if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 && VT.SimpleTy <= MVT::riscv_nxv2i8x8) - return RISCVII::LMUL_F4; + return RISCVVType::LMUL_F4; if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 && VT.SimpleTy <= MVT::riscv_nxv4i8x8) - return RISCVII::LMUL_F2; + return RISCVVType::LMUL_F2; if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 && VT.SimpleTy <= MVT::riscv_nxv8i8x8) - return RISCVII::LMUL_1; + return RISCVVType::LMUL_1; if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 && VT.SimpleTy <= MVT::riscv_nxv16i8x4) - return RISCVII::LMUL_2; + return RISCVVType::LMUL_2; if (VT.SimpleTy == MVT::riscv_nxv32i8x2) - return RISCVII::LMUL_4; + return RISCVVType::LMUL_4; llvm_unreachable("Invalid vector tuple type LMUL."); } @@ -2392,56 +2392,54 @@ RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { default: llvm_unreachable("Invalid LMUL."); case 8: - return RISCVII::VLMUL::LMUL_F8; + return RISCVVType::LMUL_F8; case 16: - return RISCVII::VLMUL::LMUL_F4; + return RISCVVType::LMUL_F4; case 32: - return RISCVII::VLMUL::LMUL_F2; + return RISCVVType::LMUL_F2; case 64: - return RISCVII::VLMUL::LMUL_1; + return RISCVVType::LMUL_1; case 128: - return RISCVII::VLMUL::LMUL_2; + return RISCVVType::LMUL_2; case 256: - return RISCVII::VLMUL::LMUL_4; + return RISCVVType::LMUL_4; case 512: - return RISCVII::VLMUL::LMUL_8; + return RISCVVType::LMUL_8; } } -unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) { +unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVType::VLMUL LMul) { switch (LMul) { default: llvm_unreachable("Invalid LMUL."); - case RISCVII::VLMUL::LMUL_F8: - case RISCVII::VLMUL::LMUL_F4: - case RISCVII::VLMUL::LMUL_F2: - case RISCVII::VLMUL::LMUL_1: + case RISCVVType::LMUL_F8: + case RISCVVType::LMUL_F4: + case RISCVVType::LMUL_F2: + case RISCVVType::LMUL_1: return RISCV::VRRegClassID; - case RISCVII::VLMUL::LMUL_2: + case RISCVVType::LMUL_2: return RISCV::VRM2RegClassID; - case RISCVII::VLMUL::LMUL_4: + case RISCVVType::LMUL_4: return RISCV::VRM4RegClassID; - case RISCVII::VLMUL::LMUL_8: + case RISCVVType::LMUL_8: return RISCV::VRM8RegClassID; } } unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { - RISCVII::VLMUL LMUL = getLMUL(VT); - if (LMUL == RISCVII::VLMUL::LMUL_F8 || - LMUL == RISCVII::VLMUL::LMUL_F4 || - LMUL == RISCVII::VLMUL::LMUL_F2 || - LMUL == RISCVII::VLMUL::LMUL_1) { + RISCVVType::VLMUL LMUL = getLMUL(VT); + if (LMUL == RISCVVType::LMUL_F8 || LMUL == RISCVVType::LMUL_F4 || + LMUL == RISCVVType::LMUL_F2 || LMUL == RISCVVType::LMUL_1) { static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, "Unexpected subreg numbering"); return RISCV::sub_vrm1_0 + Index; } - if (LMUL == RISCVII::VLMUL::LMUL_2) { + if (LMUL == RISCVVType::LMUL_2) { static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, "Unexpected subreg numbering"); return RISCV::sub_vrm2_0 + Index; } - if (LMUL == RISCVII::VLMUL::LMUL_4) { + if (LMUL == RISCVVType::LMUL_4) { static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, "Unexpected subreg numbering"); return RISCV::sub_vrm4_0 + Index; @@ -3347,9 +3345,9 @@ static SDValue getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) { if (Passthru.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp}; return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops); @@ -3359,9 +3357,9 @@ static SDValue getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) { if (Passthru.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp}; return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops); @@ -4245,13 +4243,13 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, InstructionCost PerSlideCost = 1; switch (RISCVTargetLowering::getLMUL(ContainerVT)) { default: break; - case RISCVII::VLMUL::LMUL_2: + case RISCVVType::LMUL_2: PerSlideCost = 2; break; - case RISCVII::VLMUL::LMUL_4: + case RISCVVType::LMUL_4: PerSlideCost = 4; break; - case RISCVII::VLMUL::LMUL_8: + case RISCVVType::LMUL_8: PerSlideCost = 8; break; } @@ -4281,7 +4279,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, VT.getVectorElementType().getSizeInBits() <= Subtarget.getFLen()) && "Illegal type which will result in reserved encoding"); - const unsigned Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + const unsigned Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue Vec; UndefCount = 0; @@ -4773,11 +4771,12 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT, auto TrueMask = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).first; // We slide up by the index that the subvector is being inserted at, and set // VL to the index + the number of elements being inserted. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVII::MASK_AGNOSTIC; + unsigned Policy = + RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVVType::MASK_AGNOSTIC; // If the we're adding a suffix to the in place vector, i.e. inserting right // up to the very end of it, then we don't actually care about the tail. if (NumSubElts + Index >= (int)NumElts) - Policy |= RISCVII::TAIL_AGNOSTIC; + Policy |= RISCVVType::TAIL_AGNOSTIC; InPlace = convertToScalableVector(ContainerVT, InPlace, DAG, Subtarget); ToInsert = convertToScalableVector(ContainerVT, ToInsert, DAG, Subtarget); @@ -5570,7 +5569,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, if (LoV) Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV, DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL, - RISCVII::TAIL_AGNOSTIC); + RISCVVType::TAIL_AGNOSTIC); return convertFromScalableVector(VT, Res, DAG, Subtarget); } @@ -9457,10 +9456,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); // Use tail agnostic policy if Idx is the last index of Vec. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (VecVT.isFixedLengthVector() && isa(Idx) && Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements()) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec, Idx, Mask, InsertVL, Policy); @@ -9740,7 +9739,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, } } if (!I32VL) { - RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT); SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT); unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits()); SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); @@ -9791,7 +9790,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, if (MaskedOff.isUndef()) return Vec; // TAMU - if (Policy == RISCVII::TAIL_AGNOSTIC) + if (Policy == RISCVVType::TAIL_AGNOSTIC) return DAG.getNode(RISCVISD::VMERGE_VL, DL, VT, Mask, Vec, MaskedOff, DAG.getUNDEF(VT), AVL); // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. @@ -10547,7 +10546,7 @@ static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, M1VT, DAG.getUNDEF(M1VT), InitialValue, DAG.getVectorIdxConstant(0, DL)); SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue; - SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Policy = DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy}; SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, @@ -10807,9 +10806,9 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, SDValue VL = DAG.getConstant(EndIndex, DL, XLenVT); // Use tail agnostic policy if we're inserting over Vec's tail. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (VecVT.isFixedLengthVector() && EndIndex == VecVT.getVectorNumElements()) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; // If we're inserting into the lowest elements, use a tail undisturbed // vmv.v.v. @@ -10933,10 +10932,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, VL = DAG.getElementCount(DL, XLenVT, SubVecVT.getVectorElementCount()); // Use tail agnostic policy if we're inserting over InterSubVT's tail. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (Subtarget.expandVScale(EndIndex) == Subtarget.expandVScale(InterSubVT.getVectorElementCount())) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; // If we're inserting into the lowest elements, use a tail undisturbed // vmv.v.v. @@ -11108,7 +11107,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, // was > M1 then the index would need to be a multiple of VLMAX, and so would // divide exactly. assert(RISCVVType::decodeVLMUL(getLMUL(ContainerSubVecVT)).second || - getLMUL(ContainerSubVecVT) == RISCVII::VLMUL::LMUL_1); + getLMUL(ContainerSubVecVT) == RISCVVType::LMUL_1); // If the vector type is an LMUL-group type, extract a subvector equal to the // nearest full vector register type. @@ -11719,7 +11718,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op, DownOffset, TrueMask, UpOffset); return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset, TrueMask, DAG.getRegister(RISCV::X0, XLenVT), - RISCVII::TAIL_AGNOSTIC); + RISCVVType::TAIL_AGNOSTIC); } SDValue @@ -11883,7 +11882,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op, Ops.push_back(Mask); Ops.push_back(VL); if (IntID == Intrinsic::riscv_vle_mask) - Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); + Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT)); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); @@ -11902,7 +11901,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op, // overflow. if (IndexEltVT == MVT::i8 && VT.getVectorNumElements() > 256) { // FIXME: We need to do vector splitting manually for LMUL=8 cases. - assert(getLMUL(IndexVT) != RISCVII::LMUL_8); + assert(getLMUL(IndexVT) != RISCVVType::LMUL_8); IndexVT = IndexVT.changeVectorElementType(MVT::i16); UseVRGATHEREI16 = true; } @@ -12698,7 +12697,7 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op, getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask, UpOffset); SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, SlideDown, Op2, - UpOffset, Mask, EVL2, RISCVII::TAIL_AGNOSTIC); + UpOffset, Mask, EVL2, RISCVVType::TAIL_AGNOSTIC); if (IsMaskVector) { // Truncate Result back to a mask vector (Result has same EVL as Op2) @@ -12915,7 +12914,8 @@ SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op, } Ops.push_back(VPNode->getVectorLength()); if (!IsUnmasked) { - SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Policy = + DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); Ops.push_back(Policy); } @@ -13053,7 +13053,7 @@ SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op, Ops.push_back(Mask); Ops.push_back(VL); if (!IsUnmasked) - Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); + Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT)); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue Result = @@ -19553,8 +19553,8 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, case Intrinsic::riscv_vsetvlimax: { bool HasAVL = IntNo == Intrinsic::riscv_vsetvli; unsigned VSEW = Op.getConstantOperandVal(HasAVL + 1); - RISCVII::VLMUL VLMUL = - static_cast(Op.getConstantOperandVal(HasAVL + 2)); + RISCVVType::VLMUL VLMUL = + static_cast(Op.getConstantOperandVal(HasAVL + 2)); unsigned SEW = RISCVVType::decodeVSEW(VSEW); auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMUL); uint64_t MaxVL = Subtarget.getRealMaxVLen() / SEW; @@ -20168,7 +20168,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, // Helper to find Masked Pseudo instruction from MC instruction, LMUL and SEW. static const RISCV::RISCVMaskedPseudoInfo * -lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVII::VLMUL LMul, unsigned SEW) { +lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVVType::VLMUL LMul, unsigned SEW) { const RISCVVInversePseudosTable::PseudoInfo *Inverse = RISCVVInversePseudosTable::getBaseInfo(MCOpcode, LMul, SEW); assert(Inverse && "Unexpected LMUL and SEW pair for instruction"); @@ -20211,7 +20211,7 @@ static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI, /*IsImp*/ true)); // Emit a VFCVT_F_X - RISCVII::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags); + RISCVVType::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags); unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); // There is no E8 variant for VFCVT_F_X. assert(Log2SEW >= 4); @@ -23262,13 +23262,13 @@ bool RISCVTargetLowering::lowerDeinterleavedIntrinsicToVPLoad( Load->getModule(), IntrMaskIds[Factor - 2], {VecTupTy, Mask->getType(), EVL->getType()}); - Value *Operands[] = { - PoisonVal, - Load->getArgOperand(0), - Mask, - EVL, - ConstantInt::get(XLenTy, RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC), - ConstantInt::get(XLenTy, Log2_64(SEW))}; + Value *Operands[] = {PoisonVal, + Load->getArgOperand(0), + Mask, + EVL, + ConstantInt::get(XLenTy, RISCVVType::TAIL_AGNOSTIC | + RISCVVType::MASK_AGNOSTIC), + ConstantInt::get(XLenTy, Log2_64(SEW))}; CallInst *VlsegN = Builder.CreateCall(VlsegNFunc, Operands); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index e9dd8ff96fa37..26b888653c81d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -823,7 +823,7 @@ class RISCVTargetLowering : public TargetLowering { // Return the value of VLMax for the given vector type (i.e. SEW and LMUL) SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const; - static RISCVII::VLMUL getLMUL(MVT VT); + static RISCVVType::VLMUL getLMUL(MVT VT); inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize, unsigned MinSize) { // Original equation: @@ -839,7 +839,7 @@ class RISCVTargetLowering : public TargetLowering { static std::pair computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget); - static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul); + static unsigned getRegClassIDForLMUL(RISCVVType::VLMUL LMul); static unsigned getSubregIndexByMVT(MVT VT, unsigned Index); static unsigned getRegClassIDForVecVT(MVT VT); static std::pair diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 4a74906ed3cc3..7433603daff85 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -342,7 +342,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) { } #endif -static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) { +static bool isLMUL1OrSmaller(RISCVVType::VLMUL LMUL) { auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL); return Fractional || LMul == 1; } @@ -564,7 +564,7 @@ class VSETVLIInfo { } State = Uninitialized; // Fields from VTYPE. - RISCVII::VLMUL VLMul = RISCVII::LMUL_1; + RISCVVType::VLMUL VLMul = RISCVVType::LMUL_1; uint8_t SEW = 0; uint8_t TailAgnostic : 1; uint8_t MaskAgnostic : 1; @@ -642,7 +642,7 @@ class VSETVLIInfo { } unsigned getSEW() const { return SEW; } - RISCVII::VLMUL getVLMUL() const { return VLMul; } + RISCVVType::VLMUL getVLMUL() const { return VLMul; } bool getTailAgnostic() const { return TailAgnostic; } bool getMaskAgnostic() const { return MaskAgnostic; } @@ -707,7 +707,7 @@ class VSETVLIInfo { TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); } - void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) { + void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; @@ -716,7 +716,7 @@ class VSETVLIInfo { MaskAgnostic = MA; } - void setVLMul(RISCVII::VLMUL VLMul) { this->VLMul = VLMul; } + void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; } unsigned encodeVTYPE() const { assert(isValid() && !isUnknown() && !SEWLMULRatioOnly && @@ -1018,7 +1018,7 @@ RISCVInsertVSETVLI::getInfoForVSETVLI(const MachineInstr &MI) const { } static unsigned computeVLMAX(unsigned VLEN, unsigned SEW, - RISCVII::VLMUL VLMul) { + RISCVVType::VLMUL VLMul) { auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMul); if (Fractional) VLEN = VLEN / LMul; @@ -1043,22 +1043,18 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { if (RISCVII::hasVecPolicyOp(TSFlags)) { const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); uint64_t Policy = Op.getImm(); - assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + assert(Policy <= + (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && "Invalid Policy Value"); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + TailAgnostic = Policy & RISCVVType::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVVType::MASK_AGNOSTIC; } - // Some pseudo instructions force a tail agnostic policy despite having a - // tied def. - if (RISCVII::doesForceTailAgnostic(TSFlags)) - TailAgnostic = true; - if (!RISCVII::usesMaskPolicy(TSFlags)) MaskAgnostic = true; } - RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); + RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags); unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); // A Log2SEW of 0 is an operation on mask registers only. @@ -1250,8 +1246,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, // be coalesced into another vsetvli since we won't demand any fields. VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly NewInfo.setAVLImm(1); - NewInfo.setVTYPE(RISCVII::VLMUL::LMUL_1, /*sew*/ 8, /*ta*/ true, - /*ma*/ true); + NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true); Info = NewInfo; return; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index cea28bdce284c..47fe51bafd17c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -193,36 +193,33 @@ class RVInstCommon VLMul = 0; let TSFlags{10-8} = VLMul; - bit ForceTailAgnostic = false; - let TSFlags{11} = ForceTailAgnostic; - bit IsTiedPseudo = 0; - let TSFlags{12} = IsTiedPseudo; + let TSFlags{11} = IsTiedPseudo; bit HasSEWOp = 0; - let TSFlags{13} = HasSEWOp; + let TSFlags{12} = HasSEWOp; bit HasVLOp = 0; - let TSFlags{14} = HasVLOp; + let TSFlags{13} = HasVLOp; bit HasVecPolicyOp = 0; - let TSFlags{15} = HasVecPolicyOp; + let TSFlags{14} = HasVecPolicyOp; bit IsRVVWideningReduction = 0; - let TSFlags{16} = IsRVVWideningReduction; + let TSFlags{15} = IsRVVWideningReduction; bit UsesMaskPolicy = 0; - let TSFlags{17} = UsesMaskPolicy; + let TSFlags{16} = UsesMaskPolicy; // Indicates that the result can be considered sign extended from bit 31. Some // instructions with this flag aren't W instructions, but are either sign // extended from a smaller size, always outputs a small integer, or put zeros // in bits 63:31. Used by the SExtWRemoval pass. bit IsSignExtendingOpW = 0; - let TSFlags{18} = IsSignExtendingOpW; + let TSFlags{17} = IsSignExtendingOpW; bit HasRoundModeOp = 0; - let TSFlags{19} = HasRoundModeOp; + let TSFlags{18} = HasRoundModeOp; // This is only valid when HasRoundModeOp is set to 1. HasRoundModeOp is set // to 1 for vector fixed-point or floating-point intrinsics. This bit is @@ -230,7 +227,7 @@ class RVInstCommon narrowing case // 3 -> widening case bits<2> TargetOverlapConstraintType = 0; - let TSFlags{22-21} = TargetOverlapConstraintType; + let TSFlags{21-20} = TargetOverlapConstraintType; // Most vector instructions are elementwise, but some may depend on the value // of VL (e.g. vslide1down.vx), and others may depend on the VL and mask // (e.g. vredsum.vs, viota.m). Mark these instructions so that peepholes avoid // changing their VL and/or mask. EltDeps ElementsDependOn = EltDepsNone; - let TSFlags{23} = ElementsDependOn.VL; - let TSFlags{24} = ElementsDependOn.Mask; + let TSFlags{22} = ElementsDependOn.VL; + let TSFlags{23} = ElementsDependOn.Mask; // Indicates the EEW of a vector instruction's destination operand. EEW DestEEW = EEWSEWx1; - let TSFlags{26-25} = DestEEW.Value; + let TSFlags{25-24} = DestEEW.Value; } class RVInstgetOperand(2).getImm(); - RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType); + RISCVVType::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType); FirstSEW = RISCVVType::getSEW(FirstVType); // The first encountered vsetvli must have the same lmul as the // register class of COPY. @@ -326,7 +326,7 @@ void RISCVInstrInfo::copyPhysRegVector( const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const { const TargetRegisterInfo *TRI = STI.getRegisterInfo(); - RISCVII::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags); + RISCVVType::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags); unsigned NF = RISCVRI::getNF(RegClass->TSFlags); uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg); @@ -345,7 +345,7 @@ void RISCVInstrInfo::copyPhysRegVector( unsigned I = 0; auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding) - -> std::tuple std::tuple { if (ReversedCopy) { // For reversed copying, if there are enough aligned registers(8/4/2), we @@ -357,40 +357,40 @@ void RISCVInstrInfo::copyPhysRegVector( uint16_t Diff = DstEncoding - SrcEncoding; if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 && DstEncoding % 8 == 7) - return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, + return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8}; if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 && DstEncoding % 4 == 3) - return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, + return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4}; if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 && DstEncoding % 2 == 1) - return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, + return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2}; // Or we should do LMUL1 copying. - return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, + return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1}; } // For forward copying, if source register encoding and destination register // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying. if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0) - return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, + return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8}; if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0) - return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, + return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4}; if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0) - return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, + return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2}; // Or we should do LMUL1 copying. - return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, + return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1}; }; auto FindRegWithEncoding = [TRI](const TargetRegisterClass &RegClass, uint16_t Encoding) { MCRegister Reg = RISCV::V0 + Encoding; - if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVII::LMUL_1) + if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVVType::LMUL_1) return Reg; return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass); }; @@ -2580,7 +2580,8 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, Ok = Imm >= 0 && Imm < RISCVCC::COND_INVALID; break; case RISCVOp::OPERAND_VEC_POLICY: - Ok = (Imm & (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) == Imm; + Ok = (Imm & + (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) == Imm; break; case RISCVOp::OPERAND_SEW: Ok = (isUInt<5>(Imm) && RISCVVType::isValidSEW(1 << Imm)); @@ -2648,7 +2649,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, return false; } uint64_t Policy = MI.getOperand(OpIdx).getImm(); - if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) { + if (Policy > (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) { ErrInfo = "Invalid Policy Value"; return false; } @@ -3234,10 +3235,10 @@ std::string RISCVInstrInfo::createMIROperandComment( } case RISCVOp::OPERAND_VEC_POLICY: unsigned Policy = Op.getImm(); - assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && "Invalid Policy Value"); - OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", " - << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu"); + OS << (Policy & RISCVVType::TAIL_AGNOSTIC ? "ta" : "tu") << ", " + << (Policy & RISCVVType::MASK_AGNOSTIC ? "ma" : "mu"); break; } @@ -4336,3 +4337,53 @@ RISCVInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { return std::make_unique(LHS, RHS, Cond); } + +// FIXME: We should remove this if we have a default generic scheduling model. +bool RISCVInstrInfo::isHighLatencyDef(int Opc) const { + unsigned RVVMCOpcode = RISCV::getRVVMCOpcode(Opc); + Opc = RVVMCOpcode ? RVVMCOpcode : Opc; + switch (Opc) { + default: + return false; + // Integer div/rem. + case RISCV::DIV: + case RISCV::DIVW: + case RISCV::DIVU: + case RISCV::DIVUW: + case RISCV::REM: + case RISCV::REMW: + case RISCV::REMU: + case RISCV::REMUW: + // Floating-point div/sqrt. + case RISCV::FDIV_H: + case RISCV::FDIV_S: + case RISCV::FDIV_D: + case RISCV::FDIV_H_INX: + case RISCV::FDIV_S_INX: + case RISCV::FDIV_D_INX: + case RISCV::FDIV_D_IN32X: + case RISCV::FSQRT_H: + case RISCV::FSQRT_S: + case RISCV::FSQRT_D: + case RISCV::FSQRT_H_INX: + case RISCV::FSQRT_S_INX: + case RISCV::FSQRT_D_INX: + case RISCV::FSQRT_D_IN32X: + // Vector integer div/rem + case RISCV::VDIV_VV: + case RISCV::VDIV_VX: + case RISCV::VDIVU_VV: + case RISCV::VDIVU_VX: + case RISCV::VREM_VV: + case RISCV::VREM_VX: + case RISCV::VREMU_VV: + case RISCV::VREMU_VX: + // Vector floating-point div/sqrt. + case RISCV::VFDIV_VV: + case RISCV::VFDIV_VF: + case RISCV::VFRDIV_VF: + case RISCV::VFSQRT_V: + case RISCV::VFRSQRT7_V: + return true; + } +} diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index ec628620d2982..afbc8df50b452 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -300,6 +300,8 @@ class RISCVInstrInfo : public RISCVGenInstrInfo { std::unique_ptr analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override; + bool isHighLatencyDef(int Opc) const override; + protected: const RISCVSubtarget &STI; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 77f41e3c202c7..cc58cdf02e09c 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -122,6 +122,7 @@ def DecImm : SDNodeXForm, + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1426,6 +1427,7 @@ class VPseudoBinaryMOutMask, SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx, forcePassthruRead=true>; - let ForceTailAgnostic = true in def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask, SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx, @@ -2622,7 +2623,6 @@ multiclass VPseudoBinaryM { VPseudoBinaryNoMask; - let ForceTailAgnostic = true in def "_" # m.MX # "_MASK" : VPseudoBinaryMOutMask(inst#"_M_"#mti.BX#"_MASK") (mti.Mask VR:$passthru), (mti.Mask VR:$rs2), - (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW, TU_MU)>; + (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW, TA_MU)>; class VPatUnaryAnyMask; + (mask_type VMV0:$vm), GPR:$vl, sew, TA_MU)>; class VPatBinaryMaskPolicy; + (mask_type VMV0:$vm), GPR:$vl, sew, TA_MU)>; class VPatTiedBinaryNoMask; + (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index ffa3d3982647d..43cfc9d1e77ca 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1020,7 +1020,7 @@ multiclass VPatIntegerSetCCVL_VV; + (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. @@ -1034,7 +1034,8 @@ multiclass VPatIntegerSetCCVL_VV_Swappable(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") VR:$passthru, vti.RegClass:$rs1, - vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>; + vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, + vti.Log2SEW, TA_MU)>; } multiclass VPatIntegerSetCCVL_VX_Swappable; + GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, + vti.Log2SEW, TA_MU)>; def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), (vti.Vector vti.RegClass:$rs1), invcc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (instruction_masked VR:$passthru, vti.RegClass:$rs1, - GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>; + GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, + vti.Log2SEW, TA_MU)>; } multiclass VPatIntegerSetCCVL_VI_Swappable; + vti.Log2SEW, TA_MU)>; // FIXME: Can do some canonicalization to remove these patterns. def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), @@ -1077,7 +1080,7 @@ multiclass VPatIntegerSetCCVL_VI_Swappable; + vti.Log2SEW, TA_MU)>; } multiclass VPatFPSetCCVL_VV_VF_FV(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.RegClass:$rs2, (fvti.Mask VMV0:$vm), - GPR:$vl, fvti.Log2SEW)>; + GPR:$vl, fvti.Log2SEW, TA_MU)>; def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), (SplatFPOp fvti.ScalarRegClass:$rs2), cc, @@ -1104,7 +1107,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), - GPR:$vl, fvti.Log2SEW)>; + GPR:$vl, fvti.Log2SEW, TA_MU)>; def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), (fvti.Vector fvti.RegClass:$rs1), cc, @@ -1114,7 +1117,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), - GPR:$vl, fvti.Log2SEW)>; + GPR:$vl, fvti.Log2SEW, TA_MU)>; } } } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h index 6c4e9c7b1bdc7..0830191dde3f4 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -43,8 +43,9 @@ static inline bool isVRegClass(uint64_t TSFlags) { } /// \returns the LMUL for the register class. -static inline RISCVII::VLMUL getLMul(uint64_t TSFlags) { - return static_cast((TSFlags & VLMulShiftMask) >> VLMulShift); +static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) { + return static_cast((TSFlags & VLMulShiftMask) >> + VLMulShift); } /// \returns the NF for the register class. diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index da77bae18962c..79e3b9ee09744 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -765,9 +765,11 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, } static unsigned isM1OrSmaller(MVT VT) { - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - return (LMUL == RISCVII::VLMUL::LMUL_F8 || LMUL == RISCVII::VLMUL::LMUL_F4 || - LMUL == RISCVII::VLMUL::LMUL_F2 || LMUL == RISCVII::VLMUL::LMUL_1); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + return (LMUL == RISCVVType::VLMUL::LMUL_F8 || + LMUL == RISCVVType::VLMUL::LMUL_F4 || + LMUL == RISCVVType::VLMUL::LMUL_F2 || + LMUL == RISCVVType::VLMUL::LMUL_1); } InstructionCost RISCVTTIImpl::getScalarizationOverhead( diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 1ba7f0b522a2b..e5a98598370ec 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -65,13 +65,13 @@ class RISCVVLOptimizer : public MachineFunctionPass { /// Represents the EMUL and EEW of a MachineOperand. struct OperandInfo { // Represent as 1,2,4,8, ... and fractional indicator. This is because - // EMUL can take on values that don't map to RISCVII::VLMUL values exactly. + // EMUL can take on values that don't map to RISCVVType::VLMUL values exactly. // For example, a mask operand can have an EMUL less than MF8. std::optional> EMUL; unsigned Log2EEW; - OperandInfo(RISCVII::VLMUL EMUL, unsigned Log2EEW) + OperandInfo(RISCVVType::VLMUL EMUL, unsigned Log2EEW) : EMUL(RISCVVType::decodeVLMUL(EMUL)), Log2EEW(Log2EEW) {} OperandInfo(std::pair EMUL, unsigned Log2EEW) @@ -141,7 +141,7 @@ static raw_ostream &operator<<(raw_ostream &OS, /// SEW are from the TSFlags of MI. static std::pair getEMULEqualsEEWDivSEWTimesLMUL(unsigned Log2EEW, const MachineInstr &MI) { - RISCVII::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags); + RISCVVType::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags); auto [MILMUL, MILMULIsFractional] = RISCVVType::decodeVLMUL(MIVLMUL); unsigned MILog2SEW = MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp index 0bddbacc89e3e..ee90868d252e4 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp @@ -123,7 +123,7 @@ class RISCVVectorMaskDAGMutation : public ScheduleDAGMutation { // For LMUL=8 cases, there will be more possibilities to spill. // FIXME: We should use RegPressureTracker to do fine-grained // controls. - RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVII::LMUL_8) + RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVVType::LMUL_8) DAG->addEdge(&SU, SDep(NearestUseV0SU, SDep::Artificial)); } } diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index a4e7219c39f37..7c05ff1f1a70e 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -371,7 +371,7 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const { MI.removeOperand(2); // False operand MI.removeOperand(3); // Mask operand MI.addOperand( - MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)); + MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)); // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR @@ -438,7 +438,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) { MI.removeOperand(2); // False operand MI.removeOperand(3); // Mask operand MI.addOperand( - MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)); + MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)); // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR @@ -466,9 +466,9 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const { RISCVII::hasVecPolicyOp(MCID.TSFlags); const bool HasPassthru = RISCVII::isFirstDefTiedToFirstUse(MCID); const MCInstrDesc &MaskedMCID = TII->get(MI.getOpcode()); - assert(RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) == - RISCVII::hasVecPolicyOp(MCID.TSFlags) && - "Masked and unmasked pseudos are inconsistent"); + assert((RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) || + !RISCVII::hasVecPolicyOp(MCID.TSFlags)) && + "Unmasked pseudo has policy but masked pseudo doesn't?"); assert(HasPolicyOp == HasPassthru && "Unexpected pseudo structure"); assert(!(HasPassthru && !RISCVII::isFirstDefTiedToFirstUse(MaskedMCID)) && "Unmasked with passthru but masked with no passthru?"); @@ -476,6 +476,11 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const { MI.setDesc(MCID); + // Drop the policy operand if unmasked doesn't need it. + if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) && + !RISCVII::hasVecPolicyOp(MCID.TSFlags)) + MI.removeOperand(RISCVII::getVecPolicyOpNum(MaskedMCID)); + // TODO: Increment all MaskOpIdxs in tablegen by num of explicit defs? unsigned MaskOpIdx = I->MaskOpIdx + MI.getNumExplicitDefs(); MI.removeOperand(MaskOpIdx); @@ -575,7 +580,7 @@ bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) { Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())); if (RISCV::isVLKnownLE(MIVL, SrcVL)) - SrcPolicy.setImm(SrcPolicy.getImm() | RISCVII::TAIL_AGNOSTIC); + SrcPolicy.setImm(SrcPolicy.getImm() | RISCVVType::TAIL_AGNOSTIC); } MRI->replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(2).getReg()); @@ -641,10 +646,10 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { } // If MI was tail agnostic and the VL didn't increase, preserve it. - int64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; - if ((MI.getOperand(5).getImm() & RISCVII::TAIL_AGNOSTIC) && + int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; + if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) && RISCV::isVLKnownLE(MI.getOperand(3), SrcVL)) - Policy |= RISCVII::TAIL_AGNOSTIC; + Policy |= RISCVVType::TAIL_AGNOSTIC; Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy); MRI->replaceRegWith(MI.getOperand(0).getReg(), Src->getOperand(0).getReg()); diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index a7a34e0439ab1..7b897f7e34c6f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -216,6 +216,7 @@ std::string lookupBuiltinNameHelper(StringRef DemangledCall, // Check if the extracted name begins with: // - "__spirv_ImageSampleExplicitLod" // - "__spirv_ImageRead" + // - "__spirv_ImageWrite" // - "__spirv_ImageQuerySizeLod" // - "__spirv_UDotKHR" // - "__spirv_SDotKHR" @@ -233,20 +234,21 @@ std::string lookupBuiltinNameHelper(StringRef DemangledCall, // - "__spirv_SConvert" // - "__spirv_FConvert" // - "__spirv_SatConvert" - // and contains return type information at the end "_R". + // and maybe contains return type information at the end "_R". // If so, extract the plain builtin name without the type information. static const std::regex SpvWithR( - "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|" + "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|" + "UDotKHR|" "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|" "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|" "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|" "Convert|" - "UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*"); + "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?"); std::smatch Match; if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) { std::ssub_match SubMatch; if (DecorationId && Match.size() > 3) { - SubMatch = Match[3]; + SubMatch = Match[4]; *DecorationId = demangledPostfixToDecorationId(SubMatch.str()); } SubMatch = Match[1]; @@ -1779,6 +1781,7 @@ static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, SPIRV::BuiltIn::BuiltIn Value = SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value; uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize || + Value == SPIRV::BuiltIn::NumWorkgroups || Value == SPIRV::BuiltIn::WorkgroupSize || Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize); return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0); @@ -1931,6 +1934,9 @@ static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) { + if (Call->isSpirvOp()) + return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call, + GR->getSPIRVTypeID(Call->ReturnType)); Register Image = Call->Arguments[0]; MachineRegisterInfo *MRI = MIRBuilder.getMRI(); bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler"); @@ -2010,6 +2016,9 @@ static bool generateReadImageInst(const StringRef DemangledCall, static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) { + if (Call->isSpirvOp()) + return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call, + Register(0)); MIRBuilder.buildInstr(SPIRV::OpImageWrite) .addUse(Call->Arguments[0]) // Image. .addUse(Call->Arguments[1]) // Coordinate. diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td index 16f4252173e33..85f42fc08a4e0 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td @@ -110,11 +110,13 @@ def : DemangledBuiltin<"__spirv_DotAccSat", OpenCL_std, IntegerDot, 3, 3>; def : DemangledBuiltin<"read_imagei", OpenCL_std, ReadImage, 2, 4>; def : DemangledBuiltin<"read_imageui", OpenCL_std, ReadImage, 2, 4>; def : DemangledBuiltin<"read_imagef", OpenCL_std, ReadImage, 2, 4>; +def : DemangledBuiltin<"__spirv_ImageRead", OpenCL_std, ReadImage, 2, 0>; def : DemangledBuiltin<"write_imagef", OpenCL_std, WriteImage, 3, 4>; def : DemangledBuiltin<"write_imagei", OpenCL_std, WriteImage, 3, 4>; def : DemangledBuiltin<"write_imageui", OpenCL_std, WriteImage, 3, 4>; def : DemangledBuiltin<"write_imageh", OpenCL_std, WriteImage, 3, 4>; +def : DemangledBuiltin<"__spirv_ImageWrite", OpenCL_std, WriteImage, 3, 0>; def : DemangledBuiltin<"__translate_sampler_initializer", OpenCL_std, SampleImage, 1, 1>; def : DemangledBuiltin<"__spirv_SampledImage", OpenCL_std, SampleImage, 2, 2>; @@ -1323,6 +1325,15 @@ multiclass DemangledGetBuiltin; +defm : DemangledGetBuiltin<"get_local_linear_id", OpenCL_std, Variable, LocalInvocationIndex>; +defm : DemangledGetBuiltin<"get_work_dim", OpenCL_std, Variable, WorkDim>; +defm : DemangledGetBuiltin<"get_sub_group_size", OpenCL_std, Variable, SubgroupSize>; +defm : DemangledGetBuiltin<"get_max_sub_group_size", OpenCL_std, Variable, SubgroupMaxSize>; +defm : DemangledGetBuiltin<"get_num_sub_groups", OpenCL_std, Variable, NumSubgroups>; +defm : DemangledGetBuiltin<"get_enqueued_num_sub_groups", OpenCL_std, Variable, NumEnqueuedSubgroups>; +defm : DemangledGetBuiltin<"get_sub_group_id", OpenCL_std, Variable, SubgroupId>; +defm : DemangledGetBuiltin<"get_sub_group_local_id", OpenCL_std, Variable, SubgroupLocalInvocationId>; defm : DemangledGetBuiltin<"get_sub_group_eq_mask", OpenCL_std, Variable, SubgroupEqMask>; defm : DemangledGetBuiltin<"get_sub_group_ge_mask", OpenCL_std, Variable, SubgroupGeMask>; defm : DemangledGetBuiltin<"get_sub_group_gt_mask", OpenCL_std, Variable, SubgroupGtMask>; @@ -1339,6 +1350,7 @@ defm : DemangledGetBuiltin<"get_global_size", OpenCL_std, GetQuery, GlobalSize>; defm : DemangledGetBuiltin<"get_group_id", OpenCL_std, GetQuery, WorkgroupId>; defm : DemangledGetBuiltin<"get_enqueued_local_size", OpenCL_std, GetQuery, EnqueuedWorkgroupSize>; defm : DemangledGetBuiltin<"get_num_groups", OpenCL_std, GetQuery, NumWorkgroups>; +defm : DemangledGetBuiltin<"get_global_offset", OpenCL_std, GetQuery, GlobalOffset>; defm : DemangledGetBuiltin<"__hlsl_wave_get_lane_index", GLSL_std_450, Wave, SubgroupLocalInvocationId>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp index aff058868f306..62064579b4bdf 100644 --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1216,8 +1216,9 @@ SDValue VETargetLowering::lowerATOMIC_SWAP(SDValue Op, SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits); SDValue Ptr = N->getOperand(1); - SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), - {Ptr, DAG.getConstant(-4, DL, MVT::i64)}); + SDValue Aligned = + DAG.getNode(ISD::AND, DL, Ptr.getValueType(), + {Ptr, DAG.getSignedConstant(-4, DL, MVT::i64)}); SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(), DAG.getVTList(Op.getNode()->getValueType(0), Op.getNode()->getValueType(1)), @@ -1235,8 +1236,9 @@ SDValue VETargetLowering::lowerATOMIC_SWAP(SDValue Op, SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits); SDValue Ptr = N->getOperand(1); - SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), - {Ptr, DAG.getConstant(-4, DL, MVT::i64)}); + SDValue Aligned = + DAG.getNode(ISD::AND, DL, Ptr.getValueType(), + {Ptr, DAG.getSignedConstant(-4, DL, MVT::i64)}); SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(), DAG.getVTList(Op.getNode()->getValueType(0), Op.getNode()->getValueType(1)), @@ -1601,7 +1603,7 @@ SDValue VETargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const { VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Align - 1, DL, PtrVT)); VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, - DAG.getConstant(-Align, DL, PtrVT)); + DAG.getSignedConstant(-Align, DL, PtrVT)); // Increment the pointer, VAList, by 16 to the next vaarg. NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(16, DL)); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 039b8929c93a8..23de30275e2a1 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -3830,7 +3830,8 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { .addConstantPoolIndex(CPI, 0, OpFlag); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); - addRegReg(MIB, AddrReg, false, PICBase, false); + addRegReg(MIB, AddrReg, false, X86::NoSubRegister, PICBase, false, + X86::NoSubRegister); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getConstantPool(*FuncInfo.MF), MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment); diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index 50c56c9dd08b3..10fb6994b51b6 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -2639,11 +2639,11 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { // Add the return addr area delta back since we are not tail calling. - int64_t Offset = -1 * X86FI->getTCReturnAddrDelta(); - assert(Offset >= 0 && "TCDelta should never be positive"); - if (Offset) { + int64_t Delta = X86FI->getTCReturnAddrDelta(); + assert(Delta <= 0 && "TCDelta should never be positive"); + if (Delta) { // Check for possible merge with preceding ADD instruction. - Offset = mergeSPAdd(MBB, Terminator, Offset, true); + int64_t Offset = mergeSPAdd(MBB, Terminator, -Delta, true); emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); } } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6ed69dbd6dae0..696bb14292dd0 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -41629,23 +41629,28 @@ static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG, case X86ISD::PSHUFD: case X86ISD::PSHUFHW: case X86ISD::PSHUFLW: + case X86ISD::VPERMV: case X86ISD::VPERMI: case X86ISD::VPERMILPI: { - if (N.getOperand(0).getValueType() == ShuffleVT && - N->isOnlyUserOf(N.getOperand(0).getNode())) { - SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0)); + unsigned SrcIdx = Opc == X86ISD::VPERMV ? 1 : 0; + if (N.getOperand(SrcIdx).getValueType() == ShuffleVT && + N->isOnlyUserOf(N.getOperand(SrcIdx).getNode())) { + SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(SrcIdx)); unsigned SrcOpcode = N0.getOpcode(); EVT OpVT = N0.getValueType(); if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) { SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0)); SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1)); - bool FoldShuf = Opc != X86ISD::VPERMI; + bool FoldShuf = Opc != X86ISD::VPERMI && Opc != X86ISD::VPERMV; if (IsMergeableWithShuffle(Op00, FoldShuf) || IsMergeableWithShuffle(Op01, FoldShuf)) { SDValue LHS, RHS; Op00 = DAG.getBitcast(ShuffleVT, Op00); Op01 = DAG.getBitcast(ShuffleVT, Op01); - if (N.getNumOperands() == 2) { + if (Opc == X86ISD::VPERMV) { + LHS = DAG.getNode(Opc, DL, ShuffleVT, N.getOperand(0), Op00); + RHS = DAG.getNode(Opc, DL, ShuffleVT, N.getOperand(0), Op01); + } else if (N.getNumOperands() == 2) { LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1)); RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1)); } else { @@ -41661,11 +41666,13 @@ static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG, if (SrcOpcode == ISD::SINT_TO_FP && IsSafeToMoveShuffle(N0, SrcOpcode) && OpVT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) { - SDValue Op00 = DAG.getBitcast(ShuffleVT, N0.getOperand(0)); - SDValue Res = - N.getNumOperands() == 2 - ? DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1)) - : DAG.getNode(Opc, DL, ShuffleVT, Op00); + SDValue Res = DAG.getBitcast(ShuffleVT, N0.getOperand(0)); + if (Opc == X86ISD::VPERMV) + Res = DAG.getNode(Opc, DL, ShuffleVT, N.getOperand(0), Res); + else if (N.getNumOperands() == 2) + Res = DAG.getNode(Opc, DL, ShuffleVT, Res, N.getOperand(1)); + else + Res = DAG.getNode(Opc, DL, ShuffleVT, Res); Res = DAG.getBitcast(N0.getOperand(0).getValueType(), Res); return DAG.getBitcast(ShuffleVT, DAG.getNode(SrcOpcode, DL, OpVT, Res)); } @@ -57883,8 +57890,10 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, case ISD::SIGN_EXTEND_VECTOR_INREG: case ISD::ZERO_EXTEND_VECTOR_INREG: { // TODO: Handle ANY_EXTEND combos with SIGN/ZERO_EXTEND. - if (!IsSplat && NumOps == 2 && VT.is256BitVector() && - Subtarget.hasInt256() && + if (!IsSplat && NumOps == 2 && + ((VT.is256BitVector() && Subtarget.hasInt256()) || + (VT.is512BitVector() && Subtarget.useAVX512Regs() && + (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) && Op0.getOperand(0).getValueType().is128BitVector() && Op0.getOperand(0).getValueType() == Ops[0].getOperand(0).getValueType()) { diff --git a/llvm/lib/Target/X86/X86InstrBuilder.h b/llvm/lib/Target/X86/X86InstrBuilder.h index 07079ef87fd46..45c5f8aa82e97 100644 --- a/llvm/lib/Target/X86/X86InstrBuilder.h +++ b/llvm/lib/Target/X86/X86InstrBuilder.h @@ -161,11 +161,14 @@ addRegOffset(const MachineInstrBuilder &MIB, /// addRegReg - This function is used to add a memory reference of the form: /// [Reg + Reg]. -static inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB, - unsigned Reg1, bool isKill1, - unsigned Reg2, bool isKill2) { - return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(1) - .addReg(Reg2, getKillRegState(isKill2)).addImm(0).addReg(0); +static inline const MachineInstrBuilder & +addRegReg(const MachineInstrBuilder &MIB, unsigned Reg1, bool isKill1, + unsigned SubReg1, unsigned Reg2, bool isKill2, unsigned SubReg2) { + return MIB.addReg(Reg1, getKillRegState(isKill1), SubReg1) + .addImm(1) + .addReg(Reg2, getKillRegState(isKill2), SubReg2) + .addImm(0) + .addReg(0); } static inline const MachineInstrBuilder & diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 44db5b6865c42..d756e73659a24 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1158,8 +1158,9 @@ static bool findRedundantFlagInstr(MachineInstr &CmpInstr, bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned Opc, bool AllowSP, Register &NewSrc, - bool &isKill, MachineOperand &ImplicitOp, - LiveVariables *LV, LiveIntervals *LIS) const { + unsigned &NewSrcSubReg, bool &isKill, + MachineOperand &ImplicitOp, LiveVariables *LV, + LiveIntervals *LIS) const { MachineFunction &MF = *MI.getParent()->getParent(); const TargetRegisterClass *RC; if (AllowSP) { @@ -1168,12 +1169,16 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, RC = Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; } Register SrcReg = Src.getReg(); + unsigned SubReg = Src.getSubReg(); isKill = MI.killsRegister(SrcReg, /*TRI=*/nullptr); + NewSrcSubReg = X86::NoSubRegister; + // For both LEA64 and LEA32 the register already has essentially the right // type (32-bit or 64-bit) we may just need to forbid SP. if (Opc != X86::LEA64_32r) { NewSrc = SrcReg; + NewSrcSubReg = SubReg; assert(!Src.isUndef() && "Undef op doesn't need optimization"); if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) @@ -1189,16 +1194,18 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, ImplicitOp.setImplicit(); NewSrc = getX86SubSuperRegister(SrcReg, 64); + assert(!SubReg && "no superregister for source"); assert(NewSrc.isValid() && "Invalid Operand"); assert(!Src.isUndef() && "Undef op doesn't need optimization"); } else { // Virtual register of the wrong class, we have to create a temporary 64-bit // vreg to feed into the LEA. NewSrc = MF.getRegInfo().createVirtualRegister(RC); + NewSrcSubReg = X86::NoSubRegister; MachineInstr *Copy = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) - .addReg(SrcReg, getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill), SubReg); // Which is obviously going to be dead after we're done with it. isKill = true; @@ -1258,7 +1265,9 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, MachineBasicBlock::iterator MBBI = MI.getIterator(); Register Dest = MI.getOperand(0).getReg(); Register Src = MI.getOperand(1).getReg(); + unsigned SrcSubReg = MI.getOperand(1).getSubReg(); Register Src2; + unsigned Src2SubReg; bool IsDead = MI.getOperand(0).isDead(); bool IsKill = MI.getOperand(1).isKill(); unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; @@ -1268,7 +1277,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, MachineInstr *InsMI = BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) .addReg(InRegLEA, RegState::Define, SubReg) - .addReg(Src, getKillRegState(IsKill)); + .addReg(Src, getKillRegState(IsKill), SrcSubReg); MachineInstr *ImpDef2 = nullptr; MachineInstr *InsMI2 = nullptr; @@ -1306,12 +1315,14 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, case X86::ADD16rr: case X86::ADD16rr_DB: { Src2 = MI.getOperand(2).getReg(); + Src2SubReg = MI.getOperand(2).getSubReg(); bool IsKill2 = MI.getOperand(2).isKill(); assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); if (Src == Src2) { // ADD8rr/ADD16rr killed %reg1028, %reg1028 // just a single insert_subreg. - addRegReg(MIB, InRegLEA, true, InRegLEA, false); + addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA, false, + X86::NoSubRegister); } else { if (Subtarget.is64Bit()) InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); @@ -1323,8 +1334,9 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, InRegLEA2); InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) .addReg(InRegLEA2, RegState::Define, SubReg) - .addReg(Src2, getKillRegState(IsKill2)); - addRegReg(MIB, InRegLEA, true, InRegLEA2, true); + .addReg(Src2, getKillRegState(IsKill2), Src2SubReg); + addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA2, true, + X86::NoSubRegister); } if (LV && IsKill2 && InsMI2) LV->replaceKillInstruction(Src2, MI, *InsMI2); @@ -1428,6 +1440,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, MachineInstr *NewMI = nullptr; Register SrcReg, SrcReg2; + unsigned SrcSubReg, SrcSubReg2; bool Is64Bit = Subtarget.is64Bit(); bool Is8BitOp = false; @@ -1467,17 +1480,18 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, // LEA can't handle ESP. bool isKill; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; - MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .add(Dest) - .addReg(0) - .addImm(1LL << ShAmt) - .addReg(SrcReg, getKillRegState(isKill)) - .addImm(0) - .addReg(0); + MachineInstrBuilder MIB = + BuildMI(MF, MI.getDebugLoc(), get(Opc)) + .add(Dest) + .addReg(0) + .addImm(1LL << ShAmt) + .addReg(SrcReg, getKillRegState(isKill), SrcSubReg) + .addImm(0) + .addReg(0); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); NewMI = MIB; @@ -1505,8 +1519,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, : (Is64Bit ? X86::LEA64_32r : X86::LEA32r); bool isKill; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) @@ -1531,8 +1545,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, bool isKill; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) @@ -1569,8 +1583,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, const MachineOperand &Src2 = MI.getOperand(2); bool isKill2; MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/false, SrcReg2, isKill2, - ImplicitOp2, LV, LIS)) + if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/false, SrcReg2, SrcSubReg2, + isKill2, ImplicitOp2, LV, LIS)) return nullptr; bool isKill; @@ -1580,9 +1594,10 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, // the first call inserted a COPY from Src2 and marked it as killed. isKill = isKill2; SrcReg = SrcReg2; + SrcSubReg = SrcSubReg2; } else { - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; } @@ -1592,7 +1607,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, if (ImplicitOp2.getReg() != 0) MIB.add(ImplicitOp2); - NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); + NewMI = + addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2); // Add kills if classifyLEAReg created a new register. if (LV) { @@ -1625,13 +1641,14 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, bool isKill; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; - MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .add(Dest) - .addReg(SrcReg, getKillRegState(isKill)); + MachineInstrBuilder MIB = + BuildMI(MF, MI.getDebugLoc(), get(Opc)) + .add(Dest) + .addReg(SrcReg, getKillRegState(isKill), SrcSubReg); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); @@ -1665,13 +1682,14 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, bool isKill; MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); - if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, - ImplicitOp, LV, LIS)) + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg, + isKill, ImplicitOp, LV, LIS)) return nullptr; - MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .add(Dest) - .addReg(SrcReg, getKillRegState(isKill)); + MachineInstrBuilder MIB = + BuildMI(MF, MI.getDebugLoc(), get(Opc)) + .add(Dest) + .addReg(SrcReg, getKillRegState(isKill), SrcSubReg); if (ImplicitOp.getReg() != 0) MIB.add(ImplicitOp); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 5f87e02fe67c4..e499f925f48ec 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -310,8 +310,9 @@ class X86InstrInfo final : public X86GenInstrInfo { /// operand to the LEA instruction. bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, Register &NewSrc, - bool &isKill, MachineOperand &ImplicitOp, - LiveVariables *LV, LiveIntervals *LIS) const; + unsigned &NewSrcSubReg, bool &isKill, + MachineOperand &ImplicitOp, LiveVariables *LV, + LiveIntervals *LIS) const; /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp index 31a93f9c2a6ef..c9e495c1eba1f 100644 --- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp @@ -339,10 +339,10 @@ X86LoadValueInjectionLoadHardeningPass::getGadgetGraph( DenseMap NodeMap; int FenceCount = 0, GadgetCount = 0; auto MaybeAddNode = [&NodeMap, &Builder](MachineInstr *MI) { - auto Ref = NodeMap.find(MI); - if (Ref == NodeMap.end()) { + auto [Ref, Inserted] = NodeMap.try_emplace(MI); + if (Inserted) { auto I = Builder.addVertex(MI); - NodeMap[MI] = I; + Ref->second = I; return std::pair{I, true}; } return std::pair{Ref->getSecond(), false}; diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp index 625645a99e12f..4111f8bfd2662 100644 --- a/llvm/lib/TargetParser/RISCVTargetParser.cpp +++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp @@ -165,12 +165,12 @@ namespace RISCVVType { // 6 | vta | Vector tail agnostic // 5:3 | vsew[2:0] | Standard element width (SEW) setting // 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting -unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, +unsigned encodeVTYPE(VLMUL VLMul, unsigned SEW, bool TailAgnostic, bool MaskAgnostic) { assert(isValidSEW(SEW) && "Invalid SEW"); - unsigned VLMULBits = static_cast(VLMUL); + unsigned VLMulBits = static_cast(VLMul); unsigned VSEWBits = encodeSEW(SEW); - unsigned VTypeI = (VSEWBits << 3) | (VLMULBits & 0x7); + unsigned VTypeI = (VSEWBits << 3) | (VLMulBits & 0x7); if (TailAgnostic) VTypeI |= 0x40; if (MaskAgnostic) @@ -179,19 +179,19 @@ unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, return VTypeI; } -std::pair decodeVLMUL(RISCVII::VLMUL VLMUL) { - switch (VLMUL) { +std::pair decodeVLMUL(VLMUL VLMul) { + switch (VLMul) { default: llvm_unreachable("Unexpected LMUL value!"); - case RISCVII::VLMUL::LMUL_1: - case RISCVII::VLMUL::LMUL_2: - case RISCVII::VLMUL::LMUL_4: - case RISCVII::VLMUL::LMUL_8: - return std::make_pair(1 << static_cast(VLMUL), false); - case RISCVII::VLMUL::LMUL_F2: - case RISCVII::VLMUL::LMUL_F4: - case RISCVII::VLMUL::LMUL_F8: - return std::make_pair(1 << (8 - static_cast(VLMUL)), true); + case LMUL_1: + case LMUL_2: + case LMUL_4: + case LMUL_8: + return std::make_pair(1 << static_cast(VLMul), false); + case LMUL_F2: + case LMUL_F4: + case LMUL_F8: + return std::make_pair(1 << (8 - static_cast(VLMul)), true); } } @@ -220,7 +220,7 @@ void printVType(unsigned VType, raw_ostream &OS) { OS << ", mu"; } -unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) { +unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) { unsigned LMul; bool Fractional; std::tie(LMul, Fractional) = decodeVLMUL(VLMul); @@ -232,9 +232,8 @@ unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) { return (SEW * 8) / LMul; } -std::optional -getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW) { - unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMUL); +std::optional getSameRatioLMUL(unsigned SEW, VLMUL VLMul, unsigned EEW) { + unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMul); unsigned EMULFixedPoint = (EEW * 8) / Ratio; bool Fractional = EMULFixedPoint < 8; unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8; diff --git a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp index 20fc630a74a86..921fe8c18aa72 100644 --- a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp +++ b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp @@ -126,7 +126,7 @@ struct AlwaysInlinerLegacyPass : public ModulePass { initializeAlwaysInlinerLegacyPassPass(*PassRegistry::getPassRegistry()); } - /// Main run interface method. We override here to avoid calling skipSCC(). + /// Main run interface method. bool runOnModule(Module &M) override { auto &PSI = getAnalysis().getPSI(); diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index a66d7ce9c3f50..02b0fcb3981a7 100644 --- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -651,9 +651,9 @@ struct ArgumentUsesSummary { SmallDenseMap UsesPerBlock; }; -ArgumentAccessInfo getArgmentAccessInfo(const Instruction *I, - const ArgumentUse &ArgUse, - const DataLayout &DL) { +ArgumentAccessInfo getArgumentAccessInfo(const Instruction *I, + const ArgumentUse &ArgUse, + const DataLayout &DL) { auto GetTypeAccessRange = [&DL](Type *Ty, std::optional Offset) -> std::optional { @@ -805,7 +805,7 @@ ArgumentUsesSummary collectArgumentUsesPerBlock(Argument &A, Function &F) { } auto *I = cast(U); - bool HasWrite = UpdateUseInfo(I, getArgmentAccessInfo(I, ArgUse, DL)); + bool HasWrite = UpdateUseInfo(I, getArgumentAccessInfo(I, ArgUse, DL)); Result.HasAnyWrite |= HasWrite; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 7ef95800975db..90cd279e8a457 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -1613,6 +1613,22 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) { if (Instruction *Overflow = foldLShrOverflowBit(I)) return Overflow; + // Transform ((pow2 << x) >> cttz(pow2 << y)) -> ((1 << x) >> y) + Value *Shl0_Op0, *Shl0_Op1, *Shl1_Op1; + BinaryOperator *Shl1; + if (match(Op0, m_Shl(m_Value(Shl0_Op0), m_Value(Shl0_Op1))) && + match(Op1, m_Intrinsic(m_BinOp(Shl1))) && + match(Shl1, m_Shl(m_Specific(Shl0_Op0), m_Value(Shl1_Op1))) && + isKnownToBeAPowerOfTwo(Shl0_Op0, /*OrZero=*/true, 0, &I)) { + auto *Shl0 = cast(Op0); + bool HasNUW = Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap(); + bool HasNSW = Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap(); + if (HasNUW || HasNSW) { + Value *NewShl = Builder.CreateShl(ConstantInt::get(Shl1->getType(), 1), + Shl0_Op1, "", HasNUW, HasNSW); + return BinaryOperator::CreateLShr(NewShl, Shl1_Op1); + } + } return nullptr; } diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 38454053b039e..a1649c276de83 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -50,6 +50,7 @@ #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" +#include "llvm/IR/AttributeMask.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/ConstantRangeList.h" @@ -563,6 +564,43 @@ static void shortenAssignment(Instruction *Inst, Value *OriginalDest, for_each(LinkedDVRAssigns, InsertAssignForOverlap); } +/// Update the attributes given that a memory access is updated (the +/// dereferenced pointer could be moved forward when shortening a +/// mem intrinsic). +static void adjustArgAttributes(AnyMemIntrinsic *Intrinsic, unsigned ArgNo, + uint64_t PtrOffset) { + // Remember old attributes. + AttributeSet OldAttrs = Intrinsic->getParamAttributes(ArgNo); + + // Find attributes that should be kept, and remove the rest. + AttributeMask AttrsToRemove; + for (auto &Attr : OldAttrs) { + if (Attr.hasKindAsEnum()) { + switch (Attr.getKindAsEnum()) { + default: + break; + case Attribute::Alignment: + // Only keep alignment if PtrOffset satisfy the alignment. + if (isAligned(Attr.getAlignment().valueOrOne(), PtrOffset)) + continue; + break; + case Attribute::Dereferenceable: + case Attribute::DereferenceableOrNull: + // We could reduce the size of these attributes according to + // PtrOffset. But we simply drop these for now. + break; + case Attribute::NonNull: + case Attribute::NoUndef: + continue; + } + } + AttrsToRemove.addAttribute(Attr); + } + + // Remove the attributes that should be dropped. + Intrinsic->removeParamAttrs(ArgNo, AttrsToRemove); +} + static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, uint64_t &DeadSize, int64_t KillingStart, uint64_t KillingSize, bool IsOverwriteEnd) { @@ -644,6 +682,7 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, DeadI->getIterator()); NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc()); DeadIntrinsic->setDest(NewDestGEP); + adjustArgAttributes(DeadIntrinsic, 0, ToRemoveSize); } // Update attached dbg.assign intrinsics. Assume 8-bit byte. diff --git a/llvm/lib/Transforms/Vectorize/CMakeLists.txt b/llvm/lib/Transforms/Vectorize/CMakeLists.txt index 872e055294d55..38670ba304e53 100644 --- a/llvm/lib/Transforms/Vectorize/CMakeLists.txt +++ b/llvm/lib/Transforms/Vectorize/CMakeLists.txt @@ -9,7 +9,9 @@ add_llvm_component_library(LLVMVectorize SandboxVectorizer/Legality.cpp SandboxVectorizer/Passes/BottomUpVec.cpp SandboxVectorizer/Passes/RegionsFromMetadata.cpp + SandboxVectorizer/Passes/SeedCollection.cpp SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp + SandboxVectorizer/Passes/TransactionSave.cpp SandboxVectorizer/SandboxVectorizer.cpp SandboxVectorizer/SandboxVectorizerPassBuilder.cpp SandboxVectorizer/Scheduler.cpp diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 8c41f896ad622..e8a5db28ea0a4 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9298,6 +9298,7 @@ static void addExitUsersForFirstOrderRecurrences( VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { + using namespace llvm::VPlanPatternMatch; SmallPtrSet *, 1> InterleaveGroups; // --------------------------------------------------------------------------- @@ -9321,6 +9322,10 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { PSE, RequiresScalarEpilogueCheck, CM.foldTailByMasking(), OrigLoop); + // Build hierarchical CFG. + VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); + HCFGBuilder.buildHierarchicalCFG(); + // Don't use getDecisionAndClampRange here, because we don't know the UF // so this function is better to be conservative, rather than to split // it up into different VPlans. @@ -9371,13 +9376,8 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { // Construct recipes for the instructions in the loop // --------------------------------------------------------------------------- - // Scan the body of the loop in a topological order to visit each basic block - // after having visited its predecessor basic blocks. - LoopBlocksDFS DFS(OrigLoop); - DFS.perform(LI); - - VPBasicBlock *HeaderVPBB = Plan->getVectorLoopRegion()->getEntryBasicBlock(); - VPBasicBlock *VPBB = HeaderVPBB; + VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion(); + VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock(); BasicBlock *HeaderBB = OrigLoop->getHeader(); bool NeedsMasks = CM.foldTailByMasking() || @@ -9389,26 +9389,70 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { RecipeBuilder.collectScaledReductions(Range); auto *MiddleVPBB = Plan->getMiddleBlock(); + + // Scan the body of the loop in a topological order to visit each basic block + // after having visited its predecessor basic blocks. + ReversePostOrderTraversal> RPOT( + HeaderVPBB); + VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi(); - for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { - // Relevant instructions from basic block BB will be grouped into VPRecipe - // ingredients and fill a new VPBasicBlock. - if (VPBB != HeaderVPBB) - VPBB->setName(BB->getName()); - Builder.setInsertPoint(VPBB); + VPBlockBase *PrevVPBB = nullptr; + for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly(RPOT)) { + // Handle VPBBs down to the latch. + if (VPBB == LoopRegion->getExiting()) { + assert(!HCFGBuilder.getIRBBForVPB(VPBB) && + "the latch block shouldn't have a corresponding IRBB"); + VPBlockUtils::connectBlocks(PrevVPBB, VPBB); + break; + } - if (VPBB == HeaderVPBB) + // Create mask based on the IR BB corresponding to VPBB. + // TODO: Predicate directly based on VPlan. + Builder.setInsertPoint(VPBB, VPBB->begin()); + if (VPBB == HeaderVPBB) { + Builder.setInsertPoint(VPBB, VPBB->getFirstNonPhi()); RecipeBuilder.createHeaderMask(); - else if (NeedsMasks) - RecipeBuilder.createBlockInMask(BB); + } else if (NeedsMasks) { + // FIXME: At the moment, masks need to be placed at the beginning of the + // block, as blends introduced for phi nodes need to use it. The created + // blends should be sunk after the mask recipes. + RecipeBuilder.createBlockInMask(HCFGBuilder.getIRBBForVPB(VPBB)); + } + + // Convert input VPInstructions to widened recipes. + for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { + auto *SingleDef = cast(&R); + auto *UnderlyingValue = SingleDef->getUnderlyingValue(); + // Skip recipes that do not need transforming, including canonical IV, + // wide canonical IV and VPInstructions without underlying values. The + // latter are added above for masking. + // FIXME: Migrate code relying on the underlying instruction from VPlan0 + // to construct recipes below to not use the underlying instruction. + if (isa(&R) || + (isa(&R) && !UnderlyingValue)) + continue; + + // FIXME: VPlan0, which models a copy of the original scalar loop, should + // not use VPWidenPHIRecipe to model the phis. + assert((isa(&R) || isa(&R)) && + UnderlyingValue && "unsupported recipe"); - // Introduce each ingredient into VPlan. - // TODO: Model and preserve debug intrinsics in VPlan. - for (Instruction &I : drop_end(BB->instructionsWithoutDebug(false))) { - Instruction *Instr = &I; + if (isa(&R) && + (cast(&R)->getOpcode() == + VPInstruction::BranchOnCond || + (cast(&R)->getOpcode() == Instruction::Switch))) { + R.eraseFromParent(); + break; + } + + // TODO: Gradually replace uses of underlying instruction by analyses on + // VPlan. + Instruction *Instr = cast(UnderlyingValue); + Builder.setInsertPoint(SingleDef); SmallVector Operands; auto *Phi = dyn_cast(Instr); if (Phi && Phi->getParent() == HeaderBB) { + // The backedge value will be added in fixHeaderPhis later. Operands.push_back(Plan->getOrAddLiveIn( Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); } else { @@ -9420,15 +9464,16 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { // in the exit block, a uniform store recipe will be created for the final // invariant store of the reduction. StoreInst *SI; - if ((SI = dyn_cast(&I)) && + if ((SI = dyn_cast(Instr)) && Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) { // Only create recipe for the final invariant store of the reduction. - if (!Legal->isInvariantStoreOfReduction(SI)) - continue; - auto *Recipe = new VPReplicateRecipe( - SI, make_range(Operands.begin(), Operands.end()), - true /* IsUniform */); - Recipe->insertBefore(*MiddleVPBB, MBIP); + if (Legal->isInvariantStoreOfReduction(SI)) { + auto *Recipe = new VPReplicateRecipe( + SI, make_range(Operands.begin(), Operands.end()), + true /* IsUniform */); + Recipe->insertBefore(*MiddleVPBB, MBIP); + } + R.eraseFromParent(); continue; } @@ -9438,30 +9483,31 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { Recipe = RecipeBuilder.handleReplication(Instr, Operands, Range); RecipeBuilder.setRecipe(Instr, Recipe); - if (isa(Recipe)) { - // VPHeaderPHIRecipes must be kept in the phi section of HeaderVPBB. In - // the following cases, VPHeaderPHIRecipes may be created after non-phi - // recipes and need to be moved to the phi section of HeaderVPBB: - // * tail-folding (non-phi recipes computing the header mask are - // introduced earlier than regular header phi recipes, and should appear - // after them) - // * Optimizing truncates to VPWidenIntOrFpInductionRecipe. - - assert((HeaderVPBB->getFirstNonPhi() == VPBB->end() || - CM.foldTailByMasking() || isa(Instr)) && - "unexpected recipe needs moving"); + if (isa(Recipe) && isa(Instr)) { + // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be + // moved to the phi section in the header. Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); - } else - VPBB->appendRecipe(Recipe); + } else { + Builder.insert(Recipe); + } + if (Recipe->getNumDefinedValues() == 1) + SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue()); + else + assert(Recipe->getNumDefinedValues() == 0 && + "Unexpected multidef recipe"); + R.eraseFromParent(); } - VPBlockUtils::insertBlockAfter(Plan->createVPBasicBlock(""), VPBB); - VPBB = cast(VPBB->getSingleSuccessor()); + // Flatten the CFG in the loop. Masks for blocks have already been generated + // and added to recipes as needed. To do so, first disconnect VPBB from its + // successors. Then connect VPBB to the previously visited VPBB. + for (auto *Succ : to_vector(VPBB->getSuccessors())) + VPBlockUtils::disconnectBlocks(VPBB, Succ); + if (PrevVPBB) + VPBlockUtils::connectBlocks(PrevVPBB, VPBB); + PrevVPBB = VPBB; } - // After here, VPBB should not be used. - VPBB = nullptr; - assert(isa(Plan->getVectorLoopRegion()) && !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && "entry block must be set to a VPRegionBlock having a non-empty entry " diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index e946620406c2e..f2aa0e8328585 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -5724,6 +5724,10 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { auto CompareByBasicBlocks = [&](BasicBlock *BB1, BasicBlock *BB2) { assert(BB1 != BB2 && "Expected different basic blocks."); + if (!DT->isReachableFromEntry(BB1)) + return false; + if (!DT->isReachableFromEntry(BB2)) + return true; auto *NodeA = DT->getNode(BB1); auto *NodeB = DT->getNode(BB2); assert(NodeA && "Should only process reachable instructions"); @@ -12130,6 +12134,30 @@ bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { })) return true; + // Do not vectorize small tree of phis only, if all vector phis are also + // gathered. + if (!ForReduction && SLPCostThreshold.getNumOccurrences() && + VectorizableTree.size() <= Limit && + all_of(VectorizableTree, + [&](const std::unique_ptr &TE) { + return (TE->isGather() && + (!TE->hasState() || + TE->getOpcode() != Instruction::ExtractElement) && + count_if(TE->Scalars, IsaPred) <= + Limit) || + (TE->hasState() && + (TE->getOpcode() == Instruction::InsertElement || + (TE->getOpcode() == Instruction::PHI && + all_of(TE->Scalars, [&](Value *V) { + return isa(V) || MustGather.contains(V); + })))); + }) && + any_of(VectorizableTree, [&](const std::unique_ptr &TE) { + return TE->State == TreeEntry::Vectorize && + TE->getOpcode() == Instruction::PHI; + })) + return true; + // We can vectorize the tree if its size is greater than or equal to the // minimum size specified by the MinTreeSize command line option. if (VectorizableTree.size() >= MinTreeSize) diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp index c9a6098860c10..3da52b5b4a6f1 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.cpp @@ -483,19 +483,37 @@ void DependencyGraph::notifyEraseInstr(Instruction *I) { if (Ctx->getTracker().getState() == Tracker::TrackerState::Reverting) // We don't maintain the DAG while reverting. return; - // Update the MemDGNode chain if this is a memory node. - if (auto *MemN = dyn_cast_or_null(getNodeOrNull(I))) { + auto *N = getNode(I); + if (N == nullptr) + // Early return if there is no DAG node for `I`. + return; + if (auto *MemN = dyn_cast(getNode(I))) { + // Update the MemDGNode chain if this is a memory node. auto *PrevMemN = getMemDGNodeBefore(MemN, /*IncludingN=*/false); auto *NextMemN = getMemDGNodeAfter(MemN, /*IncludingN=*/false); if (PrevMemN != nullptr) PrevMemN->NextMemN = NextMemN; if (NextMemN != nullptr) NextMemN->PrevMemN = PrevMemN; - } + // Drop the memory dependencies from both predecessors and successors. + while (!MemN->memPreds().empty()) { + auto *PredN = *MemN->memPreds().begin(); + MemN->removeMemPred(PredN); + } + while (!MemN->memSuccs().empty()) { + auto *SuccN = *MemN->memSuccs().begin(); + SuccN->removeMemPred(MemN); + } + // NOTE: The unscheduled succs for MemNodes get updated be setMemPred(). + } else { + // If this is a non-mem node we only need to update UnscheduledSuccs. + if (!N->scheduled()) + for (auto *PredN : N->preds(*this)) + PredN->decrUnscheduledSuccs(); + } + // Finally erase the Node. InstrToNodeMap.erase(I); - - // TODO: Update the dependencies. } void DependencyGraph::notifySetUse(const Use &U, Value *NewSrc) { diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp index 0ccef5aecd28b..d57732090dcd6 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp @@ -14,20 +14,10 @@ #include "llvm/SandboxIR/Module.h" #include "llvm/SandboxIR/Region.h" #include "llvm/SandboxIR/Utils.h" -#include "llvm/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.h" -#include "llvm/Transforms/Vectorize/SandboxVectorizer/SeedCollector.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/VecUtils.h" namespace llvm { -static cl::opt - OverrideVecRegBits("sbvec-vec-reg-bits", cl::init(0), cl::Hidden, - cl::desc("Override the vector register size in bits, " - "which is otherwise found by querying TTI.")); -static cl::opt - AllowNonPow2("sbvec-allow-non-pow2", cl::init(false), cl::Hidden, - cl::desc("Allow non-power-of-2 vectorization.")); - #ifndef NDEBUG static cl::opt AlwaysVerify("sbvec-always-verify", cl::init(false), cl::Hidden, @@ -37,10 +27,6 @@ static cl::opt namespace sandboxir { -BottomUpVec::BottomUpVec(StringRef Pipeline) - : FunctionPass("bottom-up-vec"), - RPM("rpm", Pipeline, SandboxVectorizerPassBuilder::createRegionPass) {} - static SmallVector getOperand(ArrayRef Bndl, unsigned OpIdx) { SmallVector Operands; @@ -413,6 +399,7 @@ Value *BottomUpVec::vectorizeRec(ArrayRef Bndl, } bool BottomUpVec::tryVectorize(ArrayRef Bndl) { + Change = false; DeadInstrCandidates.clear(); Legality->clear(); vectorizeRec(Bndl, {}, /*Depth=*/0); @@ -420,83 +407,21 @@ bool BottomUpVec::tryVectorize(ArrayRef Bndl) { return Change; } -bool BottomUpVec::runOnFunction(Function &F, const Analyses &A) { +bool BottomUpVec::runOnRegion(Region &Rgn, const Analyses &A) { + const auto &SeedSlice = Rgn.getAux(); + assert(SeedSlice.size() >= 2 && "Bad slice!"); + Function &F = *SeedSlice[0]->getParent()->getParent(); IMaps = std::make_unique(F.getContext()); Legality = std::make_unique( A.getAA(), A.getScalarEvolution(), F.getParent()->getDataLayout(), F.getContext(), *IMaps); - Change = false; - const auto &DL = F.getParent()->getDataLayout(); - unsigned VecRegBits = - OverrideVecRegBits != 0 - ? OverrideVecRegBits - : A.getTTI() - .getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedValue(); - - // TODO: Start from innermost BBs first - for (auto &BB : F) { - SeedCollector SC(&BB, A.getScalarEvolution()); - for (SeedBundle &Seeds : SC.getStoreSeeds()) { - unsigned ElmBits = - Utils::getNumBits(VecUtils::getElementType(Utils::getExpectedType( - Seeds[Seeds.getFirstUnusedElementIdx()])), - DL); - - auto DivideBy2 = [](unsigned Num) { - auto Floor = VecUtils::getFloorPowerOf2(Num); - if (Floor == Num) - return Floor / 2; - return Floor; - }; - // Try to create the largest vector supported by the target. If it fails - // reduce the vector size by half. - for (unsigned SliceElms = std::min(VecRegBits / ElmBits, - Seeds.getNumUnusedBits() / ElmBits); - SliceElms >= 2u; SliceElms = DivideBy2(SliceElms)) { - if (Seeds.allUsed()) - break; - // Keep trying offsets after FirstUnusedElementIdx, until we vectorize - // the slice. This could be quite expensive, so we enforce a limit. - for (unsigned Offset = Seeds.getFirstUnusedElementIdx(), - OE = Seeds.size(); - Offset + 1 < OE; Offset += 1) { - // Seeds are getting used as we vectorize, so skip them. - if (Seeds.isUsed(Offset)) - continue; - if (Seeds.allUsed()) - break; - auto SeedSlice = - Seeds.getSlice(Offset, SliceElms * ElmBits, !AllowNonPow2); - if (SeedSlice.empty()) - continue; - - assert(SeedSlice.size() >= 2 && "Should have been rejected!"); - - // TODO: Refactor to remove the unnecessary copy to SeedSliceVals. - SmallVector SeedSliceVals(SeedSlice.begin(), - SeedSlice.end()); - // Create an empty region. Instructions get added to the region - // automatically by the callbacks. - auto &Ctx = F.getContext(); - Region Rgn(Ctx, A.getTTI()); - // Save the state of the IR before we make any changes. The - // transaction gets accepted/reverted by the tr-accept-or-revert pass. - Ctx.save(); - // Try to vectorize starting from the seed slice. The returned value - // is true if we found vectorizable code and generated some vector - // code for it. It does not mean that the code is profitable. - bool VecSuccess = tryVectorize(SeedSliceVals); - if (VecSuccess) - // WARNING: All passes should return false, except those that - // accept/revert the state. - Change |= RPM.runOnRegion(Rgn, A); - } - } - } - } - return Change; + // TODO: Refactor to remove the unnecessary copy to SeedSliceVals. + SmallVector SeedSliceVals(SeedSlice.begin(), SeedSlice.end()); + // Try to vectorize starting from the seed slice. The returned value + // is true if we found vectorizable code and generated some vector + // code for it. It does not mean that the code is profitable. + return tryVectorize(SeedSliceVals); } } // namespace sandboxir diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def index f3aa12729860f..f745073a1eba6 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/PassRegistry.def @@ -19,8 +19,10 @@ REGION_PASS("null", ::llvm::sandboxir::NullPass) REGION_PASS("print-instruction-count", ::llvm::sandboxir::PrintInstructionCount) +REGION_PASS("tr-save", ::llvm::sandboxir::TransactionSave) REGION_PASS("tr-accept", ::llvm::sandboxir::TransactionAlwaysAccept) REGION_PASS("tr-accept-or-revert", ::llvm::sandboxir::TransactionAcceptOrRevert) +REGION_PASS("bottom-up-vec", ::llvm::sandboxir::BottomUpVec) #undef REGION_PASS @@ -28,7 +30,7 @@ REGION_PASS("tr-accept-or-revert", ::llvm::sandboxir::TransactionAcceptOrRevert) #define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS_NAME) #endif -FUNCTION_PASS_WITH_PARAMS("bottom-up-vec", ::llvm::sandboxir::BottomUpVec) +FUNCTION_PASS_WITH_PARAMS("seed-collection", ::llvm::sandboxir::SeedCollection) FUNCTION_PASS_WITH_PARAMS("regions-from-metadata", ::llvm::sandboxir::RegionsFromMetadata) #undef FUNCTION_PASS_WITH_PARAMS diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.cpp new file mode 100644 index 0000000000000..f3b62e36e5115 --- /dev/null +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.cpp @@ -0,0 +1,96 @@ +//===- SeedCollection.cpp - Seed collection pass --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/SandboxIR/Module.h" +#include "llvm/SandboxIR/Region.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/SeedCollector.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/VecUtils.h" + +namespace llvm { + +static cl::opt + OverrideVecRegBits("sbvec-vec-reg-bits", cl::init(0), cl::Hidden, + cl::desc("Override the vector register size in bits, " + "which is otherwise found by querying TTI.")); +static cl::opt + AllowNonPow2("sbvec-allow-non-pow2", cl::init(false), cl::Hidden, + cl::desc("Allow non-power-of-2 vectorization.")); + +namespace sandboxir { +SeedCollection::SeedCollection(StringRef Pipeline) + : FunctionPass("seed-collection"), + RPM("rpm", Pipeline, SandboxVectorizerPassBuilder::createRegionPass) {} + +bool SeedCollection::runOnFunction(Function &F, const Analyses &A) { + bool Change = false; + const auto &DL = F.getParent()->getDataLayout(); + unsigned VecRegBits = + OverrideVecRegBits != 0 + ? OverrideVecRegBits + : A.getTTI() + .getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) + .getFixedValue(); + + // TODO: Start from innermost BBs first + for (auto &BB : F) { + SeedCollector SC(&BB, A.getScalarEvolution()); + for (SeedBundle &Seeds : SC.getStoreSeeds()) { + unsigned ElmBits = + Utils::getNumBits(VecUtils::getElementType(Utils::getExpectedType( + Seeds[Seeds.getFirstUnusedElementIdx()])), + DL); + + auto DivideBy2 = [](unsigned Num) { + auto Floor = VecUtils::getFloorPowerOf2(Num); + if (Floor == Num) + return Floor / 2; + return Floor; + }; + // Try to create the largest vector supported by the target. If it fails + // reduce the vector size by half. + for (unsigned SliceElms = std::min(VecRegBits / ElmBits, + Seeds.getNumUnusedBits() / ElmBits); + SliceElms >= 2u; SliceElms = DivideBy2(SliceElms)) { + if (Seeds.allUsed()) + break; + // Keep trying offsets after FirstUnusedElementIdx, until we vectorize + // the slice. This could be quite expensive, so we enforce a limit. + for (unsigned Offset = Seeds.getFirstUnusedElementIdx(), + OE = Seeds.size(); + Offset + 1 < OE; Offset += 1) { + // Seeds are getting used as we vectorize, so skip them. + if (Seeds.isUsed(Offset)) + continue; + if (Seeds.allUsed()) + break; + + auto SeedSlice = + Seeds.getSlice(Offset, SliceElms * ElmBits, !AllowNonPow2); + if (SeedSlice.empty()) + continue; + + assert(SeedSlice.size() >= 2 && "Should have been rejected!"); + + // Create a region containing the seed slice. + auto &Ctx = F.getContext(); + Region Rgn(Ctx, A.getTTI()); + Rgn.setAux(SeedSlice); + // Run the region pass pipeline. + Change |= RPM.runOnRegion(Rgn, A); + Rgn.clearAux(); + } + } + } + } + return Change; +} +} // namespace sandboxir +} // namespace llvm diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.cpp new file mode 100644 index 0000000000000..8d39d971273b4 --- /dev/null +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.cpp @@ -0,0 +1,20 @@ +//===- TransactionSave.cpp - Save the IR state ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/InstructionCost.h" + +namespace llvm::sandboxir { + +bool TransactionSave::runOnRegion(Region &Rgn, const Analyses &A) { + Rgn.getContext().save(); + return false; +} + +} // namespace llvm::sandboxir diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp index b233d35212f94..5837cc16fcbac 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizer.cpp @@ -32,9 +32,11 @@ static cl::opt UserDefinedPassPipeline( SandboxVectorizerPass::SandboxVectorizerPass() : FPM("fpm") { if (UserDefinedPassPipeline == DefaultPipelineMagicStr) { // TODO: Add passes to the default pipeline. It currently contains: - // - the bottom-up-vectorizer pass + // - Seed collection, which creates seed regions and runs the pipeline + // - Bottom-up Vectorizer pass that starts from a seed + // - Accept or revert IR state pass FPM.setPassPipeline( - "bottom-up-vec", + "seed-collection", sandboxir::SandboxVectorizerPassBuilder::createFunctionPass); } else { // Create the user-defined pipeline. diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp index 0c1ab55e91a5c..389f9cc4cae7c 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/SandboxVectorizerPassBuilder.cpp @@ -4,8 +4,10 @@ #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/NullPass.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/PrintInstructionCount.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/RegionsFromMetadata.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/SeedCollection.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAcceptOrRevert.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionAlwaysAccept.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/Passes/TransactionSave.h" namespace llvm::sandboxir { diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 1332e50252978..cd111365c134c 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -600,16 +600,25 @@ static bool hasConditionalTerminator(const VPBasicBlock *VPBB) { } const VPRecipeBase *R = &VPBB->back(); + bool IsSwitch = isa(R) && + cast(R)->getOpcode() == Instruction::Switch; bool IsCondBranch = isa(R) || match(R, m_BranchOnCond(m_VPValue())) || match(R, m_BranchOnCount(m_VPValue(), m_VPValue())); (void)IsCondBranch; - - if (VPBB->getNumSuccessors() >= 2 || + (void)IsSwitch; + if (VPBB->getNumSuccessors() == 2 || (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) { - assert(IsCondBranch && "block with multiple successors not terminated by " - "conditional branch recipe"); + assert((IsCondBranch || IsSwitch) && + "block with multiple successors not terminated by " + "conditional branch nor switch recipe"); + + return true; + } + if (VPBB->getNumSuccessors() > 2) { + assert(IsSwitch && "block with more than 2 successors not terminated by " + "a switch recipe"); return true; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp index 70d8575ba82c5..22c2f91ff55f6 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp @@ -75,7 +75,7 @@ class PlainCFGBuilder { : TheLoop(Lp), LI(LI), Plan(P) {} /// Build plain CFG for TheLoop and connects it to Plan's entry. - void buildPlainCFG(); + void buildPlainCFG(DenseMap &VPB2IRBB); }; } // anonymous namespace @@ -242,10 +242,10 @@ bool PlainCFGBuilder::isExternalDef(Value *Val) { // Instruction definition is in outermost loop PH. return false; - // Check whether Instruction definition is in the loop exit. - BasicBlock *Exit = TheLoop->getUniqueExitBlock(); - assert(Exit && "Expected loop with single exit."); - if (InstParent == Exit) { + // Check whether Instruction definition is in a loop exit. + SmallVector ExitBlocks; + TheLoop->getExitBlocks(ExitBlocks); + if (is_contained(ExitBlocks, InstParent)) { // Instruction definition is in outermost loop exit. return false; } @@ -288,6 +288,7 @@ VPValue *PlainCFGBuilder::getOrCreateVPOperand(Value *IRVal) { void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB, BasicBlock *BB) { VPIRBuilder.setInsertPoint(VPBB); + // TODO: Model and preserve debug intrinsics in VPlan. for (Instruction &InstRef : BB->instructionsWithoutDebug(false)) { Instruction *Inst = &InstRef; @@ -313,6 +314,14 @@ void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB, continue; } + if (auto *SI = dyn_cast(Inst)) { + SmallVector Ops = {getOrCreateVPOperand(SI->getCondition())}; + for (auto Case : SI->cases()) + Ops.push_back(getOrCreateVPOperand(Case.getCaseValue())); + VPIRBuilder.createNaryOp(Instruction::Switch, Ops, Inst); + continue; + } + VPValue *NewVPV; if (auto *Phi = dyn_cast(Inst)) { // Phi node's operands may have not been visited at this point. We create @@ -339,7 +348,8 @@ void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB, } // Main interface to build the plain CFG. -void PlainCFGBuilder::buildPlainCFG() { +void PlainCFGBuilder::buildPlainCFG( + DenseMap &VPB2IRBB) { // 0. Reuse the top-level region, vector-preheader and exit VPBBs from the // skeleton. These were created directly rather than via getOrCreateVPBB(), // revisit them now to update BB2VPBB. Note that header/entry and @@ -428,6 +438,14 @@ void PlainCFGBuilder::buildPlainCFG() { // Set VPBB successors. We create empty VPBBs for successors if they don't // exist already. Recipes will be created when the successor is visited // during the RPO traversal. + if (auto *SI = dyn_cast(BB->getTerminator())) { + SmallVector Succs = { + getOrCreateVPBB(SI->getDefaultDest())}; + for (auto Case : SI->cases()) + Succs.push_back(getOrCreateVPBB(Case.getCaseSuccessor())); + VPBB->setSuccessors(Succs); + continue; + } auto *BI = cast(BB->getTerminator()); unsigned NumSuccs = succ_size(BB); if (NumSuccs == 1) { @@ -481,11 +499,14 @@ void PlainCFGBuilder::buildPlainCFG() { // have a VPlan couterpart. Fix VPlan phi nodes by adding their corresponding // VPlan operands. fixPhiNodes(); + + for (const auto &[IRBB, VPB] : BB2VPBB) + VPB2IRBB[VPB] = IRBB; } void VPlanHCFGBuilder::buildPlainCFG() { PlainCFGBuilder PCFGBuilder(TheLoop, LI, Plan); - PCFGBuilder.buildPlainCFG(); + PCFGBuilder.buildPlainCFG(VPB2IRBB); } // Public interface to build a H-CFG. diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.h b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.h index ad6e2ad90a961..bc853bf7a1395 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.h +++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.h @@ -53,6 +53,10 @@ class VPlanHCFGBuilder { // are introduced. VPDominatorTree VPDomTree; + /// Map of create VP blocks to their input IR basic blocks, if they have been + /// created for a input IR basic block. + DenseMap VPB2IRBB; + /// Build plain CFG for TheLoop and connects it to Plan's entry. void buildPlainCFG(); @@ -62,6 +66,14 @@ class VPlanHCFGBuilder { /// Build H-CFG for TheLoop and update Plan accordingly. void buildHierarchicalCFG(); + + /// Return the input IR BasicBlock corresponding to \p VPB. Returns nullptr if + /// there is no such corresponding block. + /// FIXME: This is a temporary workaround to drive the createBlockInMask. + /// Remove once mask creation is done on VPlan. + BasicBlock *getIRBBForVPB(const VPBlockBase *VPB) const { + return VPB2IRBB.lookup(VPB); + } }; } // namespace llvm diff --git a/llvm/test/Analysis/BasicAA/escape-source-aggregate.ll b/llvm/test/Analysis/BasicAA/escape-source-aggregate.ll new file mode 100644 index 0000000000000..cef11b94f3873 --- /dev/null +++ b/llvm/test/Analysis/BasicAA/escape-source-aggregate.ll @@ -0,0 +1,24 @@ +; RUN: opt -passes=aa-eval -print-all-alias-modref-info -disable-output 2>&1 < %s | FileCheck %s + +declare { ptr, i1 } @get_struct() +declare <2 x ptr> @get_vec() + +; CHECK: MayAlias: i32* %a, i32* %extract +define i32 @test_extractvalue() { + %a = alloca i32 + %call = call { ptr, i1 } @get_struct() + %extract = extractvalue { ptr, i1 } %call, 0 + store i32 0, ptr %extract + %v = load i32, ptr %a + ret i32 %v +} + +; CHECK: MayAlias: i32* %a, i32* %extract +define i32 @test_extractelement() { + %a = alloca i32 + %call = call <2 x ptr> @get_vec() + %extract = extractelement <2 x ptr> %call, i32 0 + store i32 0, ptr %extract + %v = load i32, ptr %a + ret i32 %v +} diff --git a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll index 525995156481c..8603417081067 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll @@ -140,6 +140,53 @@ exit: ret void } +; Test with multiple GEP indices +define void @single_stride_array(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride) { +; CHECK-LABEL: 'single_stride_array' +; CHECK-NEXT: loop: +; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop +; CHECK-NEXT: Backward loop carried data dependence. +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Backward: +; CHECK-NEXT: %load = load [2 x i32], ptr %gep.A, align 4 -> +; CHECK-NEXT: store [2 x i32] %ins, ptr %gep.A.next, align 4 +; CHECK-EMPTY: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Grouped accesses: +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-NEXT: Equal predicate: %stride == 1 +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds [2 x i32], ptr %A, i64 %mul, i64 1: +; CHECK-NEXT: {(4 + %A),+,(8 * %stride)}<%loop> +; CHECK-NEXT: --> {(4 + %A),+,8}<%loop> +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %mul = mul i64 %iv, %stride + %gep.A = getelementptr inbounds [2 x i32], ptr %A, i64 %mul, i64 1 + %load = load [2 x i32], ptr %gep.A, align 4 + %gep.B = getelementptr inbounds [2 x i32], ptr %B, i64 %iv + %load_1 = load [2 x i32], ptr %gep.B, align 4 + %v1 = extractvalue [2 x i32] %load, 0 + %v2 = extractvalue [2 x i32] %load_1, 0 + %add = add i32 %v1, %v2 + %ins = insertvalue [2 x i32] poison, i32 %add, 0 + %iv.next = add nuw nsw i64 %iv, 1 + %gep.A.next = getelementptr inbounds [2 x i32], ptr %A, i64 %iv.next + store [2 x i32] %ins, ptr %gep.A.next, align 4 + %exitcond = icmp eq i64 %iv.next, %N + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + define void @single_stride_castexpr(i32 %offset, ptr %src, ptr %dst, i1 %cond) { ; CHECK-LABEL: 'single_stride_castexpr' ; CHECK-NEXT: inner.loop: diff --git a/llvm/test/Analysis/ProfileSummary/basic.ll b/llvm/test/Analysis/ProfileSummary/basic.ll index 966a1117c47d1..c4f48ccafde86 100644 --- a/llvm/test/Analysis/ProfileSummary/basic.ll +++ b/llvm/test/Analysis/ProfileSummary/basic.ll @@ -2,12 +2,16 @@ ; RUN: opt < %s -disable-output -profile-summary-hot-count=500 -passes=print-profile-summary -S 2>&1 | FileCheck %s -check-prefixes=OVERRIDE-HOT ; RUN: opt < %s -disable-output -profile-summary-cold-count=0 -passes=print-profile-summary -S 2>&1 | FileCheck %s -check-prefixes=OVERRIDE-COLD ; RUN: opt < %s -disable-output -profile-summary-cold-count=200 -profile-summary-hot-count=1000 -passes=print-profile-summary -S 2>&1 | FileCheck %s -check-prefixes=OVERRIDE-BOTH +; RUN: opt < %s -disable-output -profile-summary-cutoff-hot=0 -passes=print-profile-summary -S 2>&1 | FileCheck %s -check-prefixes=HOT-CUTOFF-0 +; RUN: opt < %s -disable-output -profile-summary-cutoff-cold=0 -profile-summary-hot-count=18446744073709551615 -passes=print-profile-summary -S 2>&1 | FileCheck %s -check-prefixes=COLD-CUTOFF-0 define void @f1() !prof !20 { ; CHECK-LABEL: f1 :hot ; OVERRIDE-HOT-LABEL: f1 ; OVERRIDE-COLD-LABEL: f1 :hot ; OVERRIDE-BOTH-LABEL: f1 +; HOT-CUTOFF-0-LABEL: f1{{$}} +; COLD-CUTOFF-0-LABEL: f1 :cold ret void } @@ -17,6 +21,8 @@ define void @f2() !prof !21 { ; OVERRIDE-HOT-LABEL: f2 :cold ; OVERRIDE-COLD-LABEL: f2 ; OVERRIDE-BOTH-LABEL: f2 +; HOT-CUTOFF-0-LABEL: f2 :cold +; COLD-CUTOFF-0-LABEL: f2 :cold ret void } @@ -26,6 +32,8 @@ define void @f3() !prof !22 { ; OVERRIDE-HOT-LABEL: f3 ; OVERRIDE-COLD-LABEL: f3 ; OVERRIDE-BOTH-LABEL: f3 +; HOT-CUTOFF-0-LABEL: f3{{$}} +; COLD-CUTOFF-0-LABEL: f3 :cold ret void } diff --git a/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll b/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll index f26478cb13fa3..b461b6a600c65 100644 --- a/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll +++ b/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll @@ -1,13 +1,19 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -disable-output "-passes=print" 2>&1 | FileCheck %s ; Check that we convert ; trunc(C * a) -> trunc(C) * trunc(a) ; if C is a constant. -; CHECK-LABEL: @trunc_of_mul define i8 @trunc_of_mul(i32 %a) { +; CHECK-LABEL: 'trunc_of_mul' +; CHECK-NEXT: Classifying expressions for: @trunc_of_mul +; CHECK-NEXT: %b = mul i32 %a, 100 +; CHECK-NEXT: --> (100 * %a) U: [0,-3) S: [-2147483648,2147483645) +; CHECK-NEXT: %c = trunc i32 %b to i8 +; CHECK-NEXT: --> (100 * (trunc i32 %a to i8)) U: [0,-3) S: [-128,125) +; CHECK-NEXT: Determining loop execution counts for: @trunc_of_mul +; %b = mul i32 %a, 100 - ; CHECK: %c - ; CHECK-NEXT: --> (100 * (trunc i32 %a to i8)) %c = trunc i32 %b to i8 ret i8 %c } @@ -15,31 +21,43 @@ define i8 @trunc_of_mul(i32 %a) { ; Check that we convert ; trunc(C + a) -> trunc(C) + trunc(a) ; if C is a constant. -; CHECK-LABEL: @trunc_of_add define i8 @trunc_of_add(i32 %a) { +; CHECK-LABEL: 'trunc_of_add' +; CHECK-NEXT: Classifying expressions for: @trunc_of_add +; CHECK-NEXT: %b = add i32 %a, 100 +; CHECK-NEXT: --> (100 + %a) U: full-set S: full-set +; CHECK-NEXT: %c = trunc i32 %b to i8 +; CHECK-NEXT: --> (100 + (trunc i32 %a to i8)) U: full-set S: full-set +; CHECK-NEXT: Determining loop execution counts for: @trunc_of_add +; %b = add i32 %a, 100 - ; CHECK: %c - ; CHECK-NEXT: --> (100 + (trunc i32 %a to i8)) %c = trunc i32 %b to i8 ret i8 %c } ; Check that we truncate to zero values assumed to have at least as many ; trailing zeros as the target type. -; CHECK-LABEL: @trunc_to_assumed_zeros define i8 @trunc_to_assumed_zeros(ptr %p) { +; CHECK-LABEL: 'trunc_to_assumed_zeros' +; CHECK-NEXT: Classifying expressions for: @trunc_to_assumed_zeros +; CHECK-NEXT: %a = load i32, ptr %p, align 4 +; CHECK-NEXT: --> %a U: [0,-255) S: [-2147483648,2147483393) +; CHECK-NEXT: %and = and i32 %a, 255 +; CHECK-NEXT: --> 0 U: [0,1) S: [0,1) +; CHECK-NEXT: %c = trunc i32 %a to i8 +; CHECK-NEXT: --> 0 U: [0,1) S: [0,1) +; CHECK-NEXT: %d = trunc i32 %a to i1 +; CHECK-NEXT: --> false U: [0,-1) S: [0,-1) +; CHECK-NEXT: %e = trunc i32 %a to i16 +; CHECK-NEXT: --> (trunc i32 %a to i16) U: [0,-255) S: [-32768,32513) +; CHECK-NEXT: Determining loop execution counts for: @trunc_to_assumed_zeros +; %a = load i32, ptr %p %and = and i32 %a, 255 %cmp = icmp eq i32 %and, 0 tail call void @llvm.assume(i1 %cmp) - ; CHECK: %c - ; CHECK-NEXT: --> 0 %c = trunc i32 %a to i8 - ; CHECK: %d - ; CHECK-NEXT: --> false %d = trunc i32 %a to i1 - ; CHECK: %e - ; CHECK-NEXT: --> (trunc i32 %a to i16) %e = trunc i32 %a to i16 ret i8 %c } diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll index 9ece9edb84343..40daf8ffb63ea 100644 --- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll +++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple aarch64 -mattr=+neon,+dotprod < %s | FileCheck %s --check-prefixes=CHECK,CHECK-DOT,CHECK-NOI8MM ; RUN: llc -mtriple aarch64 -mattr=+neon < %s | FileCheck %s --check-prefixes=CHECK,CHECK-NOI8MM,CHECK-NODOT ; RUN: llc -mtriple aarch64 -mattr=+neon,+dotprod,+i8mm < %s | FileCheck %s --check-prefixes=CHECK,CHECK-DOT,CHECK-I8MM +; RUN: llc -mtriple aarch64 -mattr=+neon,+dotprod,+i8mm -aarch64-enable-partial-reduce-nodes < %s | FileCheck %s --check-prefixes=CHECK,CHECK-NOI8MM,CHECK-NODOT define <4 x i32> @udot(<4 x i32> %acc, <16 x i8> %u, <16 x i8> %s) { ; CHECK-DOT-LABEL: udot: diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll index 66f83c658ff4f..455231dd37be6 100644 --- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll +++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll @@ -1,12 +1,36 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=aarch64 -mattr=+sve2,+i8mm %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-I8MM ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NOI8MM +; RUN: llc -mtriple=aarch64 -mattr=+sve2,+i8mm -aarch64-enable-partial-reduce-nodes %s -o - | FileCheck %s --check-prefixes=CHECK-NEWLOWERING define @udot( %acc, %a, %b) { ; CHECK-LABEL: udot: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: udot z0.s, z1.b, z2.b ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: udot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: uunpklo z3.h, z1.b +; CHECK-NEWLOWERING-NEXT: uunpklo z4.h, z2.b +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.h, z2.b +; CHECK-NEWLOWERING-NEXT: ptrue p0.s +; CHECK-NEWLOWERING-NEXT: uunpklo z5.s, z3.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z3.s, z3.h +; CHECK-NEWLOWERING-NEXT: uunpklo z6.s, z4.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z4.s, z4.h +; CHECK-NEWLOWERING-NEXT: uunpklo z7.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z24.s, z2.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z5.s, z6.s +; CHECK-NEWLOWERING-NEXT: mul z3.s, z3.s, z4.s +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.s, p0/m, z7.s, z24.s +; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = zext %a to %b.wide = zext %b to @@ -20,6 +44,29 @@ define @udot_wide( %acc, ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: udot z0.d, z1.h, z2.h ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: udot_wide: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z2.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: ptrue p0.d +; CHECK-NEWLOWERING-NEXT: uunpklo z5.d, z3.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z4.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z4.d, z4.s +; CHECK-NEWLOWERING-NEXT: uunpklo z7.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpklo z24.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z5.d, z6.d +; CHECK-NEWLOWERING-NEXT: mul z3.d, z3.d, z4.d +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z1.d, z2.d +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z24.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = zext %a to %b.wide = zext %b to @@ -33,6 +80,29 @@ define @sdot( %accc, %a, ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sdot z0.s, z1.b, z2.b ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: sdot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: sunpklo z3.h, z1.b +; CHECK-NEWLOWERING-NEXT: sunpklo z4.h, z2.b +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.h, z1.b +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.h, z2.b +; CHECK-NEWLOWERING-NEXT: ptrue p0.s +; CHECK-NEWLOWERING-NEXT: sunpklo z5.s, z3.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z3.s, z3.h +; CHECK-NEWLOWERING-NEXT: sunpklo z6.s, z4.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z4.s, z4.h +; CHECK-NEWLOWERING-NEXT: sunpklo z7.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpklo z24.s, z2.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z5.s, z6.s +; CHECK-NEWLOWERING-NEXT: mul z3.s, z3.s, z4.s +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.s, p0/m, z7.s, z24.s +; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = sext %a to %b.wide = sext %b to @@ -46,6 +116,29 @@ define @sdot_wide( %acc, ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sdot z0.d, z1.h, z2.h ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: sdot_wide: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: sunpklo z3.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpklo z4.s, z2.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: ptrue p0.d +; CHECK-NEWLOWERING-NEXT: sunpklo z5.d, z3.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z4.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z4.d, z4.s +; CHECK-NEWLOWERING-NEXT: sunpklo z7.d, z1.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: sunpklo z24.d, z2.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z5.d, z6.d +; CHECK-NEWLOWERING-NEXT: mul z3.d, z3.d, z4.d +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z1.d, z2.d +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z24.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = sext %a to %b.wide = sext %b to @@ -82,6 +175,29 @@ define @usdot( %acc, %a, ; CHECK-NOI8MM-NEXT: mla z1.s, p0/m, z7.s, z24.s ; CHECK-NOI8MM-NEXT: add z0.s, z1.s, z0.s ; CHECK-NOI8MM-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: usdot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: uunpklo z3.h, z1.b +; CHECK-NEWLOWERING-NEXT: sunpklo z4.h, z2.b +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.h, z2.b +; CHECK-NEWLOWERING-NEXT: ptrue p0.s +; CHECK-NEWLOWERING-NEXT: uunpklo z5.s, z3.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z3.s, z3.h +; CHECK-NEWLOWERING-NEXT: sunpklo z6.s, z4.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z4.s, z4.h +; CHECK-NEWLOWERING-NEXT: uunpklo z7.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpklo z24.s, z2.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z5.s, z6.s +; CHECK-NEWLOWERING-NEXT: mul z3.s, z3.s, z4.s +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.s, p0/m, z7.s, z24.s +; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = zext %a to %b.wide = sext %b to @@ -118,6 +234,29 @@ define @sudot( %acc, %a, ; CHECK-NOI8MM-NEXT: mla z1.s, p0/m, z7.s, z24.s ; CHECK-NOI8MM-NEXT: add z0.s, z1.s, z0.s ; CHECK-NOI8MM-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: sudot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: sunpklo z3.h, z1.b +; CHECK-NEWLOWERING-NEXT: uunpklo z4.h, z2.b +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.h, z1.b +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.h, z2.b +; CHECK-NEWLOWERING-NEXT: ptrue p0.s +; CHECK-NEWLOWERING-NEXT: sunpklo z5.s, z3.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z3.s, z3.h +; CHECK-NEWLOWERING-NEXT: uunpklo z6.s, z4.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z4.s, z4.h +; CHECK-NEWLOWERING-NEXT: sunpklo z7.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z24.s, z2.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z5.s, z6.s +; CHECK-NEWLOWERING-NEXT: mul z3.s, z3.s, z4.s +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.s, p0/m, z7.s, z24.s +; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = sext %a to %b.wide = zext %b to @@ -136,6 +275,63 @@ define @udot_8to64( %acc, %a to %b.wide = zext %b to @@ -155,6 +351,63 @@ define @sdot_8to64( %acc, %a to %b.wide = sext %b to @@ -231,6 +484,63 @@ define @usdot_8to64( %acc, %a to %b.wide = sext %b to @@ -307,6 +617,63 @@ define @sudot_8to64( %acc, %a to %b.wide = zext %b to @@ -322,6 +689,20 @@ define @udot_no_bin_op( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32( %acc, %a.ext) ret %partial.reduce @@ -333,6 +714,20 @@ define @sdot_no_bin_op( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32( %acc, %a.ext) ret %partial.reduce @@ -344,6 +739,20 @@ define @udot_no_bin_op_wide( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv8i64( %acc, %a.wide) @@ -356,6 +765,20 @@ define @sdot_no_bin_op_wide( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv8i64( %acc, %a.wide) @@ -373,6 +796,32 @@ define @udot_no_bin_op_8to64( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i64.nxv16i64( %acc, %a.ext) ret %partial.reduce @@ -389,6 +838,32 @@ define @sdot_no_bin_op_8to64( %acc, %a to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i64.nxv16i64( %acc, %a.ext) ret %partial.reduce @@ -407,6 +882,19 @@ define @not_udot( %acc, % ; CHECK-NEXT: mla z0.s, p0/m, z3.s, z4.s ; CHECK-NEXT: mla z0.s, p0/m, z1.s, z2.s ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: not_udot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEWLOWERING-NEXT: and z2.h, z2.h, #0xff +; CHECK-NEWLOWERING-NEXT: ptrue p0.s +; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z2.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z3.s, z4.s +; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = zext %a to %b.wide = zext %b to @@ -428,6 +916,19 @@ define @not_udot_wide( %acc, %a to %b.wide = zext %b to @@ -459,6 +960,29 @@ define @not_usdot( %acc, ; CHECK-NEXT: mla z1.d, p0/m, z7.d, z24.d ; CHECK-NEXT: add z0.d, z1.d, z0.d ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: not_usdot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpklo z4.s, z2.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: ptrue p0.d +; CHECK-NEWLOWERING-NEXT: uunpklo z5.d, z3.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z4.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z4.d, z4.s +; CHECK-NEWLOWERING-NEXT: uunpklo z7.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: sunpklo z24.d, z2.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z5.d, z6.d +; CHECK-NEWLOWERING-NEXT: mul z3.d, z3.d, z4.d +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z1.d, z2.d +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z24.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = zext %a to %b.wide = sext %b to @@ -490,6 +1014,29 @@ define @not_sudot( %acc, ; CHECK-NEXT: mla z1.d, p0/m, z7.d, z24.d ; CHECK-NEXT: add z0.d, z1.d, z0.d ; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: not_sudot: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: sunpklo z3.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z2.h +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h +; CHECK-NEWLOWERING-NEXT: ptrue p0.d +; CHECK-NEWLOWERING-NEXT: sunpklo z5.d, z3.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z4.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z4.d, z4.s +; CHECK-NEWLOWERING-NEXT: sunpklo z7.d, z1.s +; CHECK-NEWLOWERING-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpklo z24.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z5.d, z6.d +; CHECK-NEWLOWERING-NEXT: mul z3.d, z3.d, z4.d +; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z1.d, z2.d +; CHECK-NEWLOWERING-NEXT: movprfx z1, z3 +; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z24.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret entry: %a.wide = sext %a to %b.wide = zext %b to @@ -522,6 +1069,30 @@ define @udot_different_types( %acc, %a to %b.wide = zext %b to @@ -555,6 +1126,31 @@ define @sdot_different_types( %acc, %a to %b.wide = sext %b to @@ -588,6 +1184,31 @@ define @usdot_different_types( %acc, %a to %b.wide = sext %b to @@ -620,6 +1241,30 @@ define @sudot_different_types( %acc, %a to %b.wide = zext %b to @@ -627,3 +1272,89 @@ entry: %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv8i64( %acc, %mult) ret %partial.reduce } + +define @udot_nxv8i8_promote ( %acc, %a, %b){ +; CHECK-LABEL: udot_nxv8i8_promote: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEXT: and z2.h, z2.h, #0xff +; CHECK-NEXT: mul z1.h, z1.h, z2.h +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpklo z3.d, z2.s +; CHECK-NEXT: uunpklo z4.d, z1.s +; CHECK-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: add z0.d, z0.d, z3.d +; CHECK-NEXT: add z2.d, z2.d, z4.d +; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: udot_nxv8i8_promote: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEWLOWERING-NEXT: and z2.h, z2.h, #0xff +; CHECK-NEWLOWERING-NEXT: mul z1.h, z1.h, z2.h +; CHECK-NEWLOWERING-NEXT: uunpklo z2.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z3.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpklo z4.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: add z0.d, z0.d, z3.d +; CHECK-NEWLOWERING-NEXT: add z2.d, z2.d, z4.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret +entry: + %a.wide = zext %a to + %b.wide = zext %b to + %mult = mul nuw nsw %a.wide, %b.wide + %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i16.nxv8i16( %acc, %mult) + ret %partial.reduce +} + +define @sdot_nxv8i8_promote ( %acc, %a, %b){ +; CHECK-LABEL: sdot_nxv8i8_promote: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: sxtb z1.h, p0/m, z1.h +; CHECK-NEXT: sxtb z2.h, p0/m, z2.h +; CHECK-NEXT: mul z1.h, z1.h, z2.h +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpklo z3.d, z2.s +; CHECK-NEXT: uunpklo z4.d, z1.s +; CHECK-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: add z0.d, z0.d, z3.d +; CHECK-NEXT: add z2.d, z2.d, z4.d +; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +; +; CHECK-NEWLOWERING-LABEL: sdot_nxv8i8_promote: +; CHECK-NEWLOWERING: // %bb.0: // %entry +; CHECK-NEWLOWERING-NEXT: ptrue p0.h +; CHECK-NEWLOWERING-NEXT: sxtb z1.h, p0/m, z1.h +; CHECK-NEWLOWERING-NEXT: sxtb z2.h, p0/m, z2.h +; CHECK-NEWLOWERING-NEXT: mul z1.h, z1.h, z2.h +; CHECK-NEWLOWERING-NEXT: uunpklo z2.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEWLOWERING-NEXT: uunpklo z3.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpklo z4.d, z1.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEWLOWERING-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEWLOWERING-NEXT: add z0.d, z0.d, z3.d +; CHECK-NEWLOWERING-NEXT: add z2.d, z2.d, z4.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEWLOWERING-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEWLOWERING-NEXT: ret +entry: + %a.wide = sext %a to + %b.wide = sext %b to + %mult = mul nuw nsw %a.wide, %b.wide + %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i16.nxv8i16( %acc, %mult) + ret %partial.reduce +} diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll index b4b946c68566e..11fb60ead4fb2 100644 --- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll +++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE2 ; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE +; RUN: llc -mtriple=aarch64 -mattr=+sve2 -aarch64-enable-partial-reduce-nodes %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NEWLOWERING define @signed_wide_add_nxv4i32( %acc, %input){ ; CHECK-SVE2-LABEL: signed_wide_add_nxv4i32: @@ -16,6 +17,14 @@ define @signed_wide_add_nxv4i32( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64( %acc, %input.wide) @@ -36,6 +45,14 @@ define @unsigned_wide_add_nxv4i32( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64( %acc, %input.wide) @@ -56,6 +73,14 @@ define @signed_wide_add_nxv8i16( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32( %acc, %input.wide) @@ -76,6 +101,14 @@ define @unsigned_wide_add_nxv8i16( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32( %acc, %input.wide) @@ -96,6 +129,14 @@ define @signed_wide_add_nxv16i8( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16( %acc, %input.wide) @@ -116,6 +157,14 @@ define @unsigned_wide_add_nxv16i8( %acc, %input to %partial.reduce = tail call @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16( %acc, %input.wide) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll index 4a151aeca87e4..6171c73d8d2dc 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll @@ -25,7 +25,7 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -43,7 +43,7 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 4, i32 0, i32 0) ret float %loaded } @@ -74,7 +74,7 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr inreg %p) { ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 4, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -104,7 +104,7 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i32 inreg %nu ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } @@ -136,7 +136,7 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } @@ -161,7 +161,7 @@ define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i32 i ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_2]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } @@ -221,12 +221,52 @@ define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i3 ; CHECK-NEXT: bb.5: ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) { + ; CHECK-LABEL: name: read_buffer_fat_ptr_p0 + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 + ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i32 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr + ret float %loaded +} + +define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) { + ; CHECK-LABEL: name: read_buffer_fat_ptr_p1 + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 + ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr + ret float %loaded +} + +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i32, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32, i32 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll index 21a2ae80574e0..db9a89a2a7370 100644 --- a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll +++ b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll @@ -3,8 +3,10 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLATSCR %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs -mattr=-unaligned-access-mode < %s | FileCheck -check-prefixes=GFX10,GFX10_DEFAULT %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GFX10,FLATSCR_GFX10 %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode < %s | FileCheck -check-prefixes=GFX11 %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode,+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode,-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch,+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch,-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s define <2 x half> @chain_hi_to_lo_private() { ; GFX900-LABEL: chain_hi_to_lo_private: @@ -156,14 +158,23 @@ define <2 x half> @chain_hi_to_lo_arithmatic(ptr addrspace(5) %base, half %in) { ; FLATSCR_GFX10-NEXT: v_mov_b32_e32 v0, v1 ; FLATSCR_GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: chain_hi_to_lo_arithmatic: -; GFX11: ; %bb.0: ; %bb -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_f16_e32 v1, 1.0, v1 -; GFX11-NEXT: scratch_load_d16_hi_b16 v1, v0, off -; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v0, v1 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX11-TRUE16-LABEL: chain_hi_to_lo_arithmatic: +; GFX11-TRUE16: ; %bb.0: ; %bb +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, 1.0, v1.l +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v1, v0, off +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: chain_hi_to_lo_arithmatic: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, 1.0, v1 +; GFX11-FAKE16-NEXT: scratch_load_d16_hi_b16 v1, v0, off +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] bb: %arith_lo = fadd half %in, 1.0 %load_hi = load half, ptr addrspace(5) %base @@ -361,18 +372,31 @@ define <2 x half> @chain_hi_to_lo_flat() { ; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: chain_hi_to_lo_flat: -; GFX11: ; %bb.0: ; %bb -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v0, 2 -; GFX11-NEXT: v_mov_b32_e32 v1, 0 -; GFX11-NEXT: flat_load_u16 v0, v[0:1] -; GFX11-NEXT: v_mov_b32_e32 v1, 0 -; GFX11-NEXT: v_mov_b32_e32 v2, 0 -; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX11-NEXT: flat_load_d16_hi_b16 v0, v[1:2] -; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX11-TRUE16-LABEL: chain_hi_to_lo_flat: +; GFX11-TRUE16: ; %bb.0: ; %bb +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 2 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-TRUE16-NEXT: flat_load_d16_b16 v0, v[0:1] +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: flat_load_d16_hi_b16 v0, v[1:2] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: chain_hi_to_lo_flat: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 2 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-FAKE16-NEXT: flat_load_u16 v0, v[0:1] +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: flat_load_d16_hi_b16 v0, v[1:2] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] bb: %gep_lo = getelementptr inbounds half, ptr null, i64 1 %load_lo = load half, ptr %gep_lo @@ -403,14 +427,23 @@ define <2 x half> @chain_hi_to_lo_flat_different_bases(ptr %base_lo, ptr %base_h ; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: chain_hi_to_lo_flat_different_bases: -; GFX11: ; %bb.0: ; %bb -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: flat_load_u16 v0, v[0:1] -; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX11-NEXT: flat_load_d16_hi_b16 v0, v[2:3] -; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX11-TRUE16-LABEL: chain_hi_to_lo_flat_different_bases: +; GFX11-TRUE16: ; %bb.0: ; %bb +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: flat_load_d16_b16 v0, v[0:1] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: flat_load_d16_hi_b16 v0, v[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: chain_hi_to_lo_flat_different_bases: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: flat_load_u16 v0, v[0:1] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: flat_load_d16_hi_b16 v0, v[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] bb: %load_lo = load half, ptr %base_lo %load_hi = load half, ptr %base_hi @@ -864,17 +897,31 @@ define <2 x i16> @chain_hi_to_lo_flat_other_dep(ptr addrspace(0) %ptr) { ; GFX10-NEXT: v_bfi_b32 v0, 0xffff, v2, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: chain_hi_to_lo_flat_other_dep: -; GFX11: ; %bb.0: ; %bb -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: flat_load_u16 v2, v[0:1] offset:2 glc dlc -; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: flat_load_d16_hi_b16 v0, v[0:1] glc dlc -; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_pk_add_u16 v0, v0, 12 op_sel_hi:[1,0] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_bfi_b32 v0, 0xffff, v2, v0 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX11-TRUE16-LABEL: chain_hi_to_lo_flat_other_dep: +; GFX11-TRUE16: ; %bb.0: ; %bb +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: flat_load_d16_b16 v2, v[0:1] offset:2 glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: flat_load_d16_hi_b16 v0, v[0:1] glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(1) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 12 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v1, v0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: chain_hi_to_lo_flat_other_dep: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: flat_load_u16 v2, v[0:1] offset:2 glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: flat_load_d16_hi_b16 v0, v[0:1] glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 12 op_sel_hi:[1,0] +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_bfi_b32 v0, 0xffff, v2, v0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] bb: %gep_lo = getelementptr inbounds i16, ptr addrspace(0) %ptr, i64 1 %load_lo = load volatile i16, ptr addrspace(0) %gep_lo diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate.mir b/llvm/test/CodeGen/AMDGPU/code-size-estimate.mir index 9e46c58b6b5a9..9ae536af6f0e9 100644 --- a/llvm/test/CodeGen/AMDGPU/code-size-estimate.mir +++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate.mir @@ -18,3 +18,105 @@ body: | $vgpr16 = V_MOV_B32_indirect_read undef $vgpr1, implicit $exec, implicit $m0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 V_MOV_B32_indirect_write undef $vgpr0, undef $vgpr3, implicit $exec, implicit $m0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3(tied-def 4) ... + +# CHECK: meta: ; @meta +# CHECK: ; wave barrier +# CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf] +# CHECK: ; codeLenInByte = 4 +--- +name: meta +tracksRegLiveness: true +body: | + bb.0: + + WAVE_BARRIER +... + +# CHECK: align4: ; @align4 +# CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf] +# CHECK: s_cbranch_scc1 .LBB{{[0-9_]+}} ; encoding: [A,A,0x85,0xbf] +# CHECK: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf] +# CHECK: .p2align 2 +# CHECK: s_endpgm ; encoding: [0x00,0x00,0x81,0xbf] +# CHECK: ; codeLenInByte = 16 + +--- +name: align4 +tracksRegLiveness: true +body: | + bb.0: + $scc = IMPLICIT_DEF + S_CBRANCH_SCC1 %bb.2, implicit $scc + + bb.1: + S_BARRIER + + bb.2 (align 4): + S_ENDPGM 0 +... + +# CHECK: align8: ; @align8 +# CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf] +# CHECK: s_cbranch_scc1 .LBB{{[0-9_]+}} ; encoding: [A,A,0x85,0xbf] +# CHECK: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf] +# CHECK: .p2align 3 +# CHECK: s_endpgm ; encoding: [0x00,0x00,0x81,0xbf] +# CHECK: ; codeLenInByte = 20 +--- +name: align8 +tracksRegLiveness: true +body: | + bb.0: + $scc = IMPLICIT_DEF + S_CBRANCH_SCC1 %bb.2, implicit $scc + + bb.1: + S_BARRIER + + bb.2 (align 8): + S_ENDPGM 0 +... + +# CHECK: align16: ; @align16 +# CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf] +# CHECK: s_cbranch_scc1 .LBB{{[0-9_]+}} ; encoding: [A,A,0x85,0xbf] +# CHECK: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf] +# CHECK: .p2align 4 +# CHECK: s_endpgm ; encoding: [0x00,0x00,0x81,0xbf] +# CHECK: ; codeLenInByte = 20 +--- +name: align16 +tracksRegLiveness: true +body: | + bb.0: + $scc = IMPLICIT_DEF + S_CBRANCH_SCC1 %bb.2, implicit $scc + + bb.1: + S_BARRIER + + bb.2 (align 16): + S_ENDPGM 0 +... + +# CHECK: align32: ; @align32 +# CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf] +# CHECK: s_cbranch_scc1 .LBB{{[0-9_]+}} ; encoding: [A,A,0x85,0xbf] +# CHECK: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf] +# CHECK: .p2align 5 +# CHECK: s_endpgm ; encoding: [0x00,0x00,0x81,0xbf] +# CHECK: ; codeLenInByte = 36 +--- +name: align32 +tracksRegLiveness: true +body: | + bb.0: + $scc = IMPLICIT_DEF + S_CBRANCH_SCC1 %bb.2, implicit $scc + + bb.1: + S_BARRIER + + bb.2 (align 32): + S_ENDPGM 0 +... diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir index 39b5076ebe5ac..807eaf2160b3c 100644 --- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir +++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir @@ -927,3 +927,37 @@ body: | S_ENDPGM 0, implicit %3 ... + +--- +name: constant_s_xor_b32_uses_subreg +tracksRegLiveness: true +body: | + bb.0: + ; GCN-LABEL: name: constant_s_xor_b32_uses_subreg + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 47 + ; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; GCN-NEXT: S_ENDPGM 0, implicit [[S_MOV_B32_]], implicit [[S_MOV_B32_1]] + %0:sreg_64 = S_MOV_B64 32 + %1:sreg_64 = S_MOV_B64 15 + %2:sgpr_32 = S_XOR_B32 %0.sub0, %1.sub0, implicit-def dead $scc + %3:sgpr_32 = S_XOR_B32 %0.sub1, %1.sub1, implicit-def dead $scc + S_ENDPGM 0, implicit %2, implicit %3 + +... + +--- +name: constant_v_or_b32_uses_subreg +tracksRegLiveness: true +body: | + bb.0: + ; GCN-LABEL: name: constant_v_or_b32_uses_subreg + ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 268435455, implicit $exec + ; GCN-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec + ; GCN-NEXT: S_ENDPGM 0, implicit [[V_MOV_B32_e32_]], implicit [[V_MOV_B32_e32_1]] + %0:vreg_64 = V_MOV_B64_PSEUDO 18446744069683019775, implicit $exec + %1:vreg_64 = V_MOV_B64_PSEUDO 0, implicit $exec + %2:vgpr_32 = V_OR_B32_e32 %0.sub0, %1.sub0, implicit $exec + %3:vgpr_32 = V_OR_B32_e32 %0.sub1, %1.sub1, implicit $exec + S_ENDPGM 0, implicit %2, implicit %3 + +... diff --git a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir index 12e8d24cb3675..ade7b4266e9e6 100644 --- a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir +++ b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir @@ -2162,8 +2162,7 @@ body: | ; GFX11-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX11-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 32768, implicit-def dead $scc - ; GFX11-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX11-NEXT: renamable $vgpr0, dead renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 12352, killed $vgpr1, 0, implicit $exec + ; GFX11-NEXT: renamable $vgpr0, dead renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $sgpr33, 12352, 0, implicit $exec ; GFX11-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 ; GFX11-NEXT: $sgpr33 = frame-destroy COPY $sgpr4 @@ -2178,8 +2177,7 @@ body: | ; GFX12-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX12-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 24576, implicit-def dead $scc - ; GFX12-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX12-NEXT: renamable $vgpr0, dead renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 4160, killed $vgpr1, 0, implicit $exec + ; GFX12-NEXT: renamable $vgpr0, dead renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $sgpr33, 4160, 0, implicit $exec ; GFX12-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 ; GFX12-NEXT: $sgpr33 = frame-destroy COPY $sgpr4 @@ -2315,8 +2313,7 @@ body: | ; GFX11-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX11-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 32768, implicit-def dead $scc - ; GFX11-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX11-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 12352, killed $vgpr1, 0, implicit $exec + ; GFX11-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $sgpr33, 12352, 0, implicit $exec ; GFX11-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 killed $vgpr0, 0, 0, implicit $exec ; GFX11-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 @@ -2332,8 +2329,7 @@ body: | ; GFX12-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX12-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 24576, implicit-def dead $scc - ; GFX12-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX12-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 4160, killed $vgpr1, 0, implicit $exec + ; GFX12-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $sgpr33, 4160, 0, implicit $exec ; GFX12-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 killed $vgpr0, 0, 0, implicit $exec ; GFX12-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 @@ -2469,8 +2465,7 @@ body: | ; GFX11-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX11-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 32768, implicit-def dead $scc - ; GFX11-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX11-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 12352, killed $vgpr1, 0, implicit $exec + ; GFX11-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $sgpr33, 12352, 0, implicit $exec ; GFX11-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 ; GFX11-NEXT: $sgpr33 = frame-destroy COPY $sgpr4 @@ -2485,8 +2480,7 @@ body: | ; GFX12-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX12-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 24576, implicit-def dead $scc - ; GFX12-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX12-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 4160, killed $vgpr1, 0, implicit $exec + ; GFX12-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $sgpr33, 4160, 0, implicit $exec ; GFX12-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 ; GFX12-NEXT: $sgpr33 = frame-destroy COPY $sgpr4 @@ -2622,8 +2616,7 @@ body: | ; GFX11-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX11-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 32768, implicit-def dead $scc - ; GFX11-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX11-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 12352, killed $vgpr1, 0, implicit $exec + ; GFX11-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 $sgpr33, 12352, 0, implicit $exec ; GFX11-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr0, 0, 0, implicit $exec ; GFX11-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX11-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 @@ -2639,8 +2632,7 @@ body: | ; GFX12-NEXT: $sgpr5 = frame-setup COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-setup COPY $sgpr32 ; GFX12-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 24576, implicit-def dead $scc - ; GFX12-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr33, implicit $exec - ; GFX12-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 4160, killed $vgpr1, 0, implicit $exec + ; GFX12-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 $sgpr33, 4160, 0, implicit $exec ; GFX12-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr0, 0, 0, implicit $exec ; GFX12-NEXT: $sgpr32 = frame-destroy COPY $sgpr34 ; GFX12-NEXT: $sgpr34 = frame-destroy COPY $sgpr5 diff --git a/llvm/test/CodeGen/AMDGPU/flat-address-space.ll b/llvm/test/CodeGen/AMDGPU/flat-address-space.ll index 4c68b8d35260f..91f9aa1c5fe3b 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-address-space.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-address-space.ll @@ -2,8 +2,9 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,CIVI %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,CIVI,CIVI-HSA %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX11 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10,GFX10PLUS %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GCN,GFX10PLUS,GFX11-FAKE16 %s ; GCN-LABEL: {{^}}store_flat_i32: ; GCN-DAG: s_load_{{dwordx2|b64}} s[[[LO_SREG:[0-9]+]]:[[HI_SREG:[0-9]+]]], @@ -224,7 +225,8 @@ define amdgpu_kernel void @store_flat_i8_neg_offset(ptr %fptr, i8 %x) #0 { ; CIVI: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc{{$}} ; GFX9: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} offset:4095 glc{{$}} ; GFX10: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc dlc{{$}} -; GFX11: flat_load_u8 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} offset:4095 glc dlc{{$}} +; GFX11-TRUE16: flat_load_d16_u8 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} offset:4095 glc dlc{{$}} +; GFX11-FAKE16: flat_load_u8 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} offset:4095 glc dlc{{$}} define amdgpu_kernel void @load_flat_i8_max_offset(ptr %fptr) #0 { %fptr.offset = getelementptr inbounds i8, ptr %fptr, i64 4095 %val = load volatile i8, ptr %fptr.offset @@ -234,7 +236,9 @@ define amdgpu_kernel void @load_flat_i8_max_offset(ptr %fptr) #0 { ; GCN-LABEL: {{^}}load_flat_i8_max_offset_p1: ; CIVI: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc{{$}} ; GFX9: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc{{$}} -; GFX10PLUS: flat_load_{{ubyte|u8}} v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc dlc{{$}} +; GFX10: flat_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc dlc{{$}} +; GFX11-TRUE16: flat_load_d16_u8 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc dlc{{$}} +; GFX11-FAKE16: flat_load_u8 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}} glc dlc{{$}} define amdgpu_kernel void @load_flat_i8_max_offset_p1(ptr %fptr) #0 { %fptr.offset = getelementptr inbounds i8, ptr %fptr, i64 4096 %val = load volatile i8, ptr %fptr.offset diff --git a/llvm/test/CodeGen/AMDGPU/fold-literal-multiple-gfx10.mir b/llvm/test/CodeGen/AMDGPU/fold-literal-multiple-gfx10.mir new file mode 100644 index 0000000000000..e71516e74f17e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fold-literal-multiple-gfx10.mir @@ -0,0 +1,66 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-fold-operands -o - %s | FileCheck %s + +# The same literal may be used multiple times in different operands, +# as long as it is the same value. + +--- +name: fold_multiple_same_literal_use_0 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: fold_multiple_same_literal_use_0 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[V_DIV_SCALE_F32_e64_:%[0-9]+]]:vgpr_32, [[V_DIV_SCALE_F32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, 1178657792, 0, 1178657792, 0, 1178657792, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_DIV_SCALE_F32_e64_]] + %0:vgpr_32 = COPY $vgpr0 + %1:sreg_32 = S_MOV_B32 1178657792 + %2:vgpr_32 = COPY %1 + %3:vgpr_32, %4:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, %2, 0, %2, 0, %2, 0, 0, implicit $mode, implicit $exec + S_ENDPGM 0, implicit %3 +... + +--- +name: fold_multiple_same_literal_use_1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: fold_multiple_same_literal_use_1 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[V_DIV_SCALE_F32_e64_:%[0-9]+]]:vgpr_32, [[V_DIV_SCALE_F32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, 1178657792, 0, 1178657792, 0, 1178657792, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_DIV_SCALE_F32_e64_]] + %0:vgpr_32 = COPY $vgpr0 + %1:sreg_32 = S_MOV_B32 1178657792 + %2:vgpr_32 = COPY %1 + %3:vgpr_32, %4:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, 1178657792, 0, 1178657792, 0, %2, 0, 0, implicit $mode, implicit $exec + S_ENDPGM 0, implicit %3 +... + +--- +name: no_fold_multiple_same_literal_different_value +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: no_fold_multiple_same_literal_different_value + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1178657793, implicit $exec + ; CHECK-NEXT: [[V_DIV_SCALE_F32_e64_:%[0-9]+]]:vgpr_32, [[V_DIV_SCALE_F32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, 1178657792, 0, 1178657792, 0, [[V_MOV_B32_e32_]], 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_DIV_SCALE_F32_e64_]] + %0:vgpr_32 = COPY $vgpr0 + %1:sreg_32 = S_MOV_B32 1178657793 + %2:vgpr_32 = COPY %1 + %3:vgpr_32, %4:sreg_32_xm0_xexec = V_DIV_SCALE_F32_e64 0, 1178657792, 0, 1178657792, 0, %2, 0, 0, implicit $mode, implicit $exec + S_ENDPGM 0, implicit %3 +... diff --git a/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir index 268a8a4783d24..edd5d0a119e5f 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir @@ -55,8 +55,7 @@ body: | # GCN-LABEL: name: fma_sgpr_sgpr_use # GCN: %0:sgpr_32 = IMPLICIT_DEF -# GCN-NEXT: %2:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec -# GCN-NEXT: %3:vgpr_32 = V_FMAC_F32_e64 0, %0, 0, 1234567, 0, %2, 0, 0, implicit $mode, implicit $exec +# GCN: %3:vgpr_32 = V_FMA_F32_e64 0, %0, 0, 1234567, 0, 1234567, 0, 0, implicit $mode, implicit $exec --- name: fma_sgpr_sgpr_use body: | diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll index b4840bce53d2c..3aa5ea995559f 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll @@ -18,7 +18,7 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-NEXT: $sgpr2 = COPY [[S_MOV_B32_1]] ; CHECK-NEXT: $sgpr3 = COPY [[S_MOV_B32_2]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -36,7 +36,7 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 4, i32 0, i32 0) ret float %loaded } @@ -59,7 +59,7 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr inreg %p) { ; CHECK-NEXT: $sgpr2 = COPY [[S_MOV_B32_2]] ; CHECK-NEXT: $sgpr3 = COPY [[S_MOV_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 4, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -81,7 +81,7 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i32 inreg %nu ; CHECK-NEXT: $sgpr2 = COPY [[COPY1]] ; CHECK-NEXT: $sgpr3 = COPY [[COPY]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } @@ -104,7 +104,7 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, ; CHECK-NEXT: $sgpr2 = COPY [[COPY1]] ; CHECK-NEXT: $sgpr3 = COPY [[COPY]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } @@ -128,7 +128,7 @@ define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i32 i ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } @@ -153,12 +153,52 @@ define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i3 ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) { + ; CHECK-LABEL: name: read_buffer_fat_ptr_p0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 + ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_1]], %subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i32 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr + ret float %loaded +} + +define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) { + ; CHECK-LABEL: name: read_buffer_fat_ptr_p1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 + ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_1]], %subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr + ret float %loaded +} + +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i32, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32, i32 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll index 99fcbc595ff7f..ea4117b418959 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll @@ -349,6 +349,20 @@ define <2 x ptr addrspace(7)> @addrspacecast_vec(<2 x ptr addrspace(8)> %buf) { ret <2 x ptr addrspace(7)> %ret } +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1), i16, i32, i32) + +define ptr addrspace(7) @make_buffer_rsrc(ptr addrspace(1) %buf, i16 %stride, i32 %numRecords, i32 %flags) { +; CHECK-LABEL: define { ptr addrspace(8), i32 } @make_buffer_rsrc +; CHECK-SAME: (ptr addrspace(1) [[BUF:%.*]], i16 [[STRIDE:%.*]], i32 [[NUMRECORDS:%.*]], i32 [[FLAGS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[BUF]], i16 [[STRIDE]], i32 [[NUMRECORDS]], i32 [[FLAGS]]) +; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { ptr addrspace(8), i32 } poison, ptr addrspace(8) [[RET]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { ptr addrspace(8), i32 } [[TMP1]], i32 0, 1 +; CHECK-NEXT: ret { ptr addrspace(8), i32 } [[TMP2]] +; + %ret = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %buf, i16 %stride, i32 %numRecords, i32 %flags) + ret ptr addrspace(7) %ret +} + define i1 @icmp_eq(ptr addrspace(7) %a, ptr addrspace(7) %b) { ; CHECK-LABEL: define i1 @icmp_eq ; CHECK-SAME: ({ ptr addrspace(8), i32 } [[A:%.*]], { ptr addrspace(8), i32 } [[B:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll index 0679686f77eef..4f88077e3b0ee 100644 --- a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll +++ b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll @@ -3,7 +3,7 @@ ; RUN: not --crash llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr addrspace(3) inreg %p) { - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p3(ptr addrspace(3) %p, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) %p, i16 0, i32 1234, i32 5678) ret ptr addrspace(8) %rsrc } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p3(ptr addrspace(3) nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) nocapture readnone, i16, i32, i32) diff --git a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll index e2f4d1c6e57bc..0ac3d652050d3 100644 --- a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll +++ b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll @@ -85,8 +85,8 @@ define amdgpu_kernel void @buffers_from_flat_dont_alias(ptr noalias %a.flat, ptr ; GISEL-NEXT: v_mul_f32_e32 v3, v3, v3 ; GISEL-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 ; GISEL-NEXT: s_endpgm - %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %a.flat, i16 0, i32 16, i32 0) - %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %b.flat, i16 0, i32 16, i32 0) + %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %a.flat, i16 0, i32 16, i32 0) + %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %b.flat, i16 0, i32 16, i32 0) %l0 = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) %a, i32 0, i32 0, i32 0) %s0 = fmul float %l0, %l0 @@ -211,4 +211,4 @@ declare i32 @llvm.amdgcn.workitem.id.x() declare float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8), i32, i32, i32) declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32 immarg) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr readnone nocapture, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone nocapture, i16, i32, i32) diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-function.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-function.ll new file mode 100644 index 0000000000000..ad2aa7997eba9 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-function.ll @@ -0,0 +1,26 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: First element of root signature is not a Function +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} + +define void @anotherMain() #0 { +entry: + ret void +} + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!dx.rootsignatures = !{!2, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, !3 } ; function, root signature +!3 = !{ !4 } ; list of root signature elements +!4 = !{ !"RootFlags", i32 1 } ; 1 = allow_input_assembler_input_layout +!5 = !{ i32 -1, !6 } ; function, root signature +!6 = !{ !7 } ; list of root signature elements +!7 = !{ !"RootFlags", i32 2 } ; 1 = allow_input_assembler_input_layout diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-value.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-value.ll new file mode 100644 index 0000000000000..4d881f96e4c3b --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-is-not-value.ll @@ -0,0 +1,26 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: First element of root signature is not a Value +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} + +define void @anotherMain() #0 { +entry: + ret void +} + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!dx.rootsignatures = !{!2, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, !3 } ; function, root signature +!3 = !{ !4 } ; list of root signature elements +!4 = !{ !"RootFlags", i32 1 } ; 1 = allow_input_assembler_input_layout +!5 = !{ !3, !6 } ; function, root signature +!6 = !{ !7 } ; list of root signature elements +!7 = !{ !"RootFlags", i32 2 } ; 1 = allow_input_assembler_input_layout diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-no-root-element-list.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-no-root-element-list.ll new file mode 100644 index 0000000000000..b5109022b4b0d --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-no-root-element-list.ll @@ -0,0 +1,26 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: Root Element mdnode is null. +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} + +define void @anotherMain() #0 { +entry: + ret void +} + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!dx.rootsignatures = !{!2, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, null } ; function, root signature +!3 = !{ !4 } ; list of root signature elements +!4 = !{ !"RootFlags", i32 1 } ; 1 = allow_input_assembler_input_layout +!5 = !{ i32 -1, !6 } ; function, root signature +!6 = !{ !7 } ; list of root signature elements +!7 = !{ !"RootFlags", i32 2 } ; 1 = allow_input_assembler_input_layout diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-root-element-not-mdnode.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-root-element-not-mdnode.ll new file mode 100644 index 0000000000000..7e6bcdadd3862 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Error-root-element-not-mdnode.ll @@ -0,0 +1,26 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: Root Element is not a metadata node. +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} + +define void @anotherMain() #0 { +entry: + ret void +} + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!dx.rootsignatures = !{!2, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, i32 -1 } ; function, root signature +!3 = !{ !4 } ; list of root signature elements +!4 = !{ !"RootFlags", i32 1 } ; 1 = allow_input_assembler_input_layout +!5 = !{ i32 -1, !6 } ; function, root signature +!6 = !{ !7 } ; list of root signature elements +!7 = !{ !"RootFlags", i32 2 } ; 1 = allow_input_assembler_input_layout diff --git a/llvm/test/CodeGen/Mips/readcyclecounter.ll b/llvm/test/CodeGen/Mips/readcyclecounter.ll index 467dd92884b3d..23d3ea014f091 100644 --- a/llvm/test/CodeGen/Mips/readcyclecounter.ll +++ b/llvm/test/CodeGen/Mips/readcyclecounter.ll @@ -7,6 +7,8 @@ ;RUN: llc -mtriple=mipsel -mcpu=mips2 < %s | FileCheck %s --check-prefix=MIPSEL_NOT_SUPPORTED ;RUN: llc -mtriple=mips64el -mcpu=mips3 < %s | FileCheck %s --check-prefix=MIPS64EL_NOT_SUPPORTED +; XFAIL: expensive_checks + declare i64 @llvm.readcyclecounter() nounwind readnone define i64 @test_readcyclecounter() nounwind { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll index eb7be14abe431..0d1d75c1b2a75 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -894,18 +894,18 @@ define <2 x i16> @vwmul_v2i16_multiuse(ptr %x, ptr %y, ptr %z, ptr %w) { ; CHECK-LABEL: vwmul_v2i16_multiuse: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vle8.v v9, (a1) -; CHECK-NEXT: vle8.v v10, (a2) -; CHECK-NEXT: vle8.v v11, (a3) -; CHECK-NEXT: vsext.vf2 v12, v8 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle8.v v9, (a2) +; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vsext.vf2 v9, v10 -; CHECK-NEXT: vsext.vf2 v10, v11 -; CHECK-NEXT: vmul.vv v11, v12, v10 -; CHECK-NEXT: vmul.vv v10, v8, v10 -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: vor.vv v9, v11, v10 +; CHECK-NEXT: vdivu.vv v8, v10, v8 +; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v11, (a3) +; CHECK-NEXT: vsext.vf2 v12, v9 +; CHECK-NEXT: vsext.vf2 v9, v11 +; CHECK-NEXT: vmul.vv v11, v12, v9 +; CHECK-NEXT: vmul.vv v9, v10, v9 +; CHECK-NEXT: vor.vv v9, v11, v9 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 1948675ae9cf0..c46334fe556eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -1564,8 +1564,8 @@ define void @sink_splat_fdiv_scalable(ptr nocapture %a, float %x) { ; CHECK-NEXT: .LBB27_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl1re32.v v8, (a5) -; CHECK-NEXT: sub a6, a6, a3 ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: sub a6, a6, a3 ; CHECK-NEXT: vs1r.v v8, (a5) ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: bnez a6, .LBB27_3 @@ -1654,8 +1654,8 @@ define void @sink_splat_frdiv_scalable(ptr nocapture %a, float %x) { ; CHECK-NEXT: .LBB28_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl1re32.v v8, (a5) -; CHECK-NEXT: sub a6, a6, a3 ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: sub a6, a6, a3 ; CHECK-NEXT: vs1r.v v8, (a5) ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: bnez a6, .LBB28_3 @@ -2504,8 +2504,8 @@ define void @sink_splat_udiv_scalable(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB42_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl2re32.v v8, (a6) -; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vdivu.vx v8, v8, a1 +; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vs2r.v v8, (a6) ; CHECK-NEXT: add a6, a6, a5 ; CHECK-NEXT: bnez a7, .LBB42_3 @@ -2595,8 +2595,8 @@ define void @sink_splat_sdiv_scalable(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB43_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl2re32.v v8, (a6) -; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vdiv.vx v8, v8, a1 +; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vs2r.v v8, (a6) ; CHECK-NEXT: add a6, a6, a5 ; CHECK-NEXT: bnez a7, .LBB43_3 @@ -2686,8 +2686,8 @@ define void @sink_splat_urem_scalable(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB44_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl2re32.v v8, (a6) -; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vremu.vx v8, v8, a1 +; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vs2r.v v8, (a6) ; CHECK-NEXT: add a6, a6, a5 ; CHECK-NEXT: bnez a7, .LBB44_3 @@ -2777,8 +2777,8 @@ define void @sink_splat_srem_scalable(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB45_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl2re32.v v8, (a6) -; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vrem.vx v8, v8, a1 +; CHECK-NEXT: sub a7, a7, a3 ; CHECK-NEXT: vs2r.v v8, (a6) ; CHECK-NEXT: add a6, a6, a5 ; CHECK-NEXT: bnez a7, .LBB45_3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll index 07750623dd44b..217a02d08dead 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll @@ -221,16 +221,16 @@ define @vfdiv_vv_nxv32bf16( %va, @vfdiv_vf_nxv32bf16( %va, bf ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; CHECK-NEXT: fmv.x.h a0, fa0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v16 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v0, v8, v0 +; CHECK-NEXT: vfdiv.vv v24, v16, v0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v12 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v16, v24, v16 +; CHECK-NEXT: vfdiv.vv v16, v0, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v24 ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 @@ -573,16 +583,16 @@ define @vfdiv_vv_nxv32f16( %va, @vfdiv_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: sub sp, sp, a0 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; ZVFHMIN-NEXT: fmv.x.h a0, fa0 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 ; ZVFHMIN-NEXT: addi a1, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vmv.v.x v16, a0 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v0, v8, v0 +; ZVFHMIN-NEXT: vfdiv.vv v24, v16, v0 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16 +; ZVFHMIN-NEXT: vfdiv.vv v16, v0, v8 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll index e671ba850415b..9aba6455f0fac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll @@ -200,16 +200,16 @@ define @vfdiv_vv_nxv32bf16( %va, @vfdiv_vv_nxv32bf16( %va, @vfdiv_vf_nxv32bf16( %va, bfloat %b) { ; CHECK-LABEL: vfdiv_vf_nxv32bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: fmv.x.h a0, fa0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v0, v8, v0 +; CHECK-NEXT: vfdiv.vv v16, v16, v0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v16, v24, v16 +; CHECK-NEXT: vfdiv.vv v24, v24, v0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v24 ; CHECK-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -528,16 +512,16 @@ define @vfdiv_vv_nxv32f16( %va, @vfdiv_vf_nxv32f16( %va, half %b ; ; ZVFHMIN-LABEL: vfdiv_vf_nxv32f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: addi sp, sp, -16 -; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: sub sp, sp, a0 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; ZVFHMIN-NEXT: fmv.x.h a0, fa0 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 -; ZVFHMIN-NEXT: addi a1, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a0 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 -; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v0, v8, v0 +; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v0 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16 +; ZVFHMIN-NEXT: vfdiv.vv v24, v24, v0 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: add sp, sp, a0 -; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 -; ZVFHMIN-NEXT: addi sp, sp, 16 -; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 ; ZVFHMIN-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll index d5e65e2c8fd3f..eeb5f3bc984d3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll @@ -92,15 +92,15 @@ define @vfsqrt_nxv32bf16( %v) stric ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfsqrt.v v16, v24 +; CHECK-NEXT: vfsqrt.v v24, v24 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v24 ; CHECK-NEXT: ret %r = call @llvm.experimental.constrained.sqrt.nxv32bf16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r @@ -229,15 +229,15 @@ define @vfsqrt_nxv32f16( %v) strictfp { ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; ZVFHMIN-NEXT: vfsqrt.v v16, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfsqrt.v v16, v24 +; ZVFHMIN-NEXT: vfsqrt.v v24, v24 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 ; ZVFHMIN-NEXT: ret %r = call @llvm.experimental.constrained.sqrt.nxv32f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll index 4d761981aac97..6d7662db2b157 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll @@ -87,15 +87,15 @@ define @vfsqrt_nxv32bf16( %v) { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfsqrt.v v16, v24 +; CHECK-NEXT: vfsqrt.v v24, v24 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v24 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv32bf16( %v) ret %r @@ -224,15 +224,15 @@ define @vfsqrt_nxv32f16( %v) { ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; ZVFHMIN-NEXT: vfsqrt.v v16, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfsqrt.v v16, v24 +; ZVFHMIN-NEXT: vfsqrt.v v24, v24 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 ; ZVFHMIN-NEXT: ret %r = call @llvm.sqrt.nxv32f16( %v) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir index d2906c4613295..c84f7735b66d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir @@ -1116,10 +1116,10 @@ body: | bb.0: ; CHECK-LABEL: name: vmop_vv_passthru_use ; CHECK: %x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */ - ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 ; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */ %x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e1 */ - %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */ ... --- @@ -1128,10 +1128,10 @@ body: | bb.0: ; CHECK-LABEL: name: vmop_vv_passthru_use_incompatible_eew ; CHECK: %x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ - ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 ; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */ %x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 - %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */ ... --- @@ -1140,10 +1140,10 @@ body: | bb.0: ; CHECK-LABEL: name: vmop_vv_passthru_use_incompatible_emul ; CHECK: %x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e8 */ - ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 ; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */ %x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e1 */ - %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */ + %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */ ... --- diff --git a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll index 59abd5dbee6a0..0c9b29de890d4 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll @@ -1,22 +1,23 @@ -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s - -; CHECK: OpName %[[#WD:]] "__spirv_BuiltInWorkDim" -; CHECK: OpName %[[#GS:]] "__spirv_BuiltInGlobalSize" -; CHECK: OpName %[[#GII:]] "__spirv_BuiltInGlobalInvocationId" -; CHECK: OpName %[[#WS:]] "__spirv_BuiltInWorkgroupSize" -; CHECK: OpName %[[#EWS:]] "__spirv_BuiltInEnqueuedWorkgroupSize" -; CHECK: OpName %[[#LLI:]] "__spirv_BuiltInLocalInvocationId" -; CHECK: OpName %[[#NW:]] "__spirv_BuiltInNumWorkgroups" -; CHECK: OpName %[[#WI:]] "__spirv_BuiltInWorkgroupId" -; CHECK: OpName %[[#GO:]] "__spirv_BuiltInGlobalOffset" -; CHECK: OpName %[[#GLI:]] "__spirv_BuiltInGlobalLinearId" -; CHECK: OpName %[[#LLII:]] "__spirv_BuiltInLocalInvocationIndex" -; CHECK: OpName %[[#SS:]] "__spirv_BuiltInSubgroupSize" -; CHECK: OpName %[[#SMS:]] "__spirv_BuiltInSubgroupMaxSize" -; CHECK: OpName %[[#NS:]] "__spirv_BuiltInNumSubgroups" -; CHECK: OpName %[[#NES:]] "__spirv_BuiltInNumEnqueuedSubgroups" -; CHECK: OpName %[[#SI:]] "__spirv_BuiltInSubgroupId" -; CHECK: OpName %[[#SLII:]] "__spirv_BuiltInSubgroupLocalInvocationId" +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpName %[[#WD:]] "__spirv_BuiltInWorkDim" +; CHECK-DAG: OpName %[[#GS:]] "__spirv_BuiltInGlobalSize" +; CHECK-DAG: OpName %[[#GII:]] "__spirv_BuiltInGlobalInvocationId" +; CHECK-DAG: OpName %[[#WS:]] "__spirv_BuiltInWorkgroupSize" +; CHECK-DAG: OpName %[[#EWS:]] "__spirv_BuiltInEnqueuedWorkgroupSize" +; CHECK-DAG: OpName %[[#LLI:]] "__spirv_BuiltInLocalInvocationId" +; CHECK-DAG: OpName %[[#NW:]] "__spirv_BuiltInNumWorkgroups" +; CHECK-DAG: OpName %[[#WI:]] "__spirv_BuiltInWorkgroupId" +; CHECK-DAG: OpName %[[#GO:]] "__spirv_BuiltInGlobalOffset" +; CHECK-DAG: OpName %[[#GLI:]] "__spirv_BuiltInGlobalLinearId" +; CHECK-DAG: OpName %[[#LLII:]] "__spirv_BuiltInLocalInvocationIndex" +; CHECK-DAG: OpName %[[#SS:]] "__spirv_BuiltInSubgroupSize" +; CHECK-DAG: OpName %[[#SMS:]] "__spirv_BuiltInSubgroupMaxSize" +; CHECK-DAG: OpName %[[#NS:]] "__spirv_BuiltInNumSubgroups" +; CHECK-DAG: OpName %[[#NES:]] "__spirv_BuiltInNumEnqueuedSubgroups" +; CHECK-DAG: OpName %[[#SI:]] "__spirv_BuiltInSubgroupId" +; CHECK-DAG: OpName %[[#SLII:]] "__spirv_BuiltInSubgroupLocalInvocationId" ; CHECK-DAG: OpDecorate %[[#NW]] BuiltIn NumWorkgroups ; CHECK-DAG: OpDecorate %[[#WS]] BuiltIn WorkgroupSize @@ -35,6 +36,33 @@ ; CHECK-DAG: OpDecorate %[[#NES]] BuiltIn NumEnqueuedSubgroups ; CHECK-DAG: OpDecorate %[[#SI]] BuiltIn SubgroupId ; CHECK-DAG: OpDecorate %[[#SLII]] BuiltIn SubgroupLocalInvocationId + +; CHECK-DAG: %[[#SizeT:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#SizeTPtr:]] = OpTypePointer Input %[[#SizeT]] +; CHECK-DAG: %[[#Int32Ptr:]] = OpTypePointer Input %[[#Int32]] + +; CHECK-DAG: %[[#GLI]] = OpVariable %[[#SizeTPtr]] Input +; CHECK-DAG: %[[#LLII]] = OpVariable %[[#SizeTPtr]] Input +; CHECK-DAG: %[[#WD]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#SS]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#SMS]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#NS]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#NES]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#SI]] = OpVariable %[[#Int32Ptr]] Input +; CHECK-DAG: %[[#SLII]] = OpVariable %[[#Int32Ptr]] Input + +; CHECK: OpFunction +; CHECK: %[[#]] = OpLoad %[[#SizeT]] %[[#GLI]] +; CHECK: %[[#]] = OpLoad %[[#SizeT]] %[[#LLII]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#WD]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#SS]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#SMS]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#NS]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#NES]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#SI]] +; CHECK: %[[#]] = OpLoad %[[#Int32]] %[[#SLII]] + @__spirv_BuiltInWorkDim = external addrspace(1) global i32 @__spirv_BuiltInGlobalSize = external addrspace(1) global <3 x i32> @__spirv_BuiltInGlobalInvocationId = external addrspace(1) global <3 x i32> @@ -55,5 +83,24 @@ define spir_kernel void @_Z1wv() { entry: + %r1 = tail call spir_func i64 @get_global_linear_id() + %r2 = tail call spir_func i64 @get_local_linear_id() + %r3 = tail call spir_func i32 @get_work_dim() + %r4 = tail call spir_func i32 @get_sub_group_size() + %r5 = tail call spir_func i32 @get_max_sub_group_size() + %r6 = tail call spir_func i32 @get_num_sub_groups() + %r7 = tail call spir_func i32 @get_enqueued_num_sub_groups() + %r8 = tail call spir_func i32 @get_sub_group_id() + %r9 = tail call spir_func i32 @get_sub_group_local_id() ret void } + +declare spir_func i64 @get_global_linear_id() +declare spir_func i64 @get_local_linear_id() +declare spir_func i32 @get_work_dim() +declare spir_func i32 @get_sub_group_size() +declare spir_func i32 @get_max_sub_group_size() +declare spir_func i32 @get_num_sub_groups() +declare spir_func i32 @get_enqueued_num_sub_groups() +declare spir_func i32 @get_sub_group_id() +declare spir_func i32 @get_sub_group_local_id() diff --git a/llvm/test/CodeGen/SPIRV/opencl/get_num_groups.ll b/llvm/test/CodeGen/SPIRV/opencl/get_num_groups.ll new file mode 100644 index 0000000000000..3f1d1dc248fc4 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/opencl/get_num_groups.ll @@ -0,0 +1,55 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s + +;; The set of valid inputs for get_num_groups depends on the runtime NDRange, +;; but inputs outside of [0, 2] always return 1. +;; Here we assume Itanium mangling for function name. +declare i64 @_Z14get_num_groupsj(i32) + +define i64 @foo(i32 %dim) { + %x = call i64 @_Z14get_num_groupsj(i32 0) + %y = call i64 @_Z14get_num_groupsj(i32 5) + %acc = add i64 %x, %y + %unknown = call i64 @_Z14get_num_groupsj(i32 %dim) + %ret = add i64 %acc, %unknown + ret i64 %ret +} + +;; Capabilities: +; CHECK-DAG: OpCapability Kernel +; CHECK-DAG: OpCapability Int64 + +; CHECK-NOT: DAG-FENCE + +;; Decorations: +; CHECK-DAG: OpDecorate %[[#GET_NUM_GROUPS:]] BuiltIn NumWorkgroups +; CHECK-DAG: OpDecorate %[[#GET_NUM_GROUPS]] Constant + +; CHECK-NOT: DAG-FENCE + +;; Types, Constants and Variables: +; CHECK-DAG: %[[#BOOL:]] = OpTypeBool +; CHECK-DAG: %[[#I32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#I64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#VEC:]] = OpTypeVector %[[#I64]] 3 +; CHECK-DAG: %[[#PTR:]] = OpTypePointer Input %[[#VEC]] +; CHECK-DAG: %[[#FN:]] = OpTypeFunction %[[#I64]] %[[#I32]] +; CHECK-DAG: %[[#GET_NUM_GROUPS]] = OpVariable %[[#PTR]] Input +; CHECK-DAG: %[[#ONE:]] = OpConstant %[[#I64]] 1 +; CHECK-DAG: %[[#THREE:]] = OpConstant %[[#I32]] 3 + +;; Functions: +; CHECK: OpFunction %[[#I64]] None %[[#FN]] +; CHECK: %[[#DIM:]] = OpFunctionParameter %[[#I32]] + +;; get_num_groups(0): OpLoad + OpCompositeExtract. +; CHECK: %[[#TMP1:]] = OpLoad %[[#VEC]] %[[#GET_NUM_GROUPS]] +; CHECK: %[[#X:]] = OpCompositeExtract %[[#I64]] %[[#TMP1]] 0 + +;; get_num_groups(5): OpConstant of one. +; CHECK: OpIAdd %[[#I64]] %[[#X]] %[[#ONE]] + +;; get_num_groups(dim): Implementation using OpSelect. +; CHECK-DAG: %[[#TMP2:]] = OpLoad %[[#VEC]] %[[#GET_NUM_GROUPS]] +; CHECK-DAG: %[[#TMP3:]] = OpVectorExtractDynamic %[[#I64]] %[[#TMP2]] %[[#DIM]] +; CHECK-DAG: %[[#COND:]] = OpULessThan %[[#BOOL]] %[[#DIM]] %[[#THREE]] +; CHECK: %[[#UNKNOWN:]] = OpSelect %[[#I64]] %[[#COND]] %[[#TMP3]] %[[#ONE]] diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll index 5074893163565..44d2f5e24f59d 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll @@ -1,8 +1,11 @@ -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} -; CHECK-SPIRV: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId -; CHECK-SPIRV: %[[#Id:]] = OpVariable %[[#]] +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalLinearId +; CHECK: %[[#Id:]] = OpVariable %[[#]] @__spirv_BuiltInGlobalLinearId = external addrspace(1) global i32 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-types.ll index 8d99a0c6cd1ce..36ae6bf478127 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-types.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-types.ll @@ -85,5 +85,61 @@ define spir_func void @test_sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, } declare spir_func target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), target("spirv.Sampler")) - declare spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), <4 x i32>, i32, float) + +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageRead +; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod + +define dso_local spir_kernel void @reads() { + %1 = tail call spir_func i32 @_Z17__spirv_ImageReadIi14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) poison, <4 x i32> zeroinitializer) + %2 = tail call spir_func <2 x i32> @_Z17__spirv_ImageReadIDv2_i14ocl_image2d_roS0_ET_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) poison, <2 x i32> zeroinitializer) + %3 = tail call spir_func <4 x i32> @_Z17__spirv_ImageReadIDv4_j14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) poison, <4 x i32> zeroinitializer) + %4 = tail call spir_func signext i16 @_Z17__spirv_ImageReadIs14ocl_image1d_roiET_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) poison, i32 0) + %5 = tail call spir_func zeroext i16 @_Z17__spirv_ImageReadIt14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) poison, <4 x i32> zeroinitializer) + %6 = tail call spir_func <2 x float> @_Z17__spirv_ImageReadIDv2_f14ocl_image1d_roiET_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) poison, i32 0) + %7 = tail call spir_func half @_Z17__spirv_ImageReadIDF16_14ocl_image2d_roDv2_iET_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) poison, <2 x i32> zeroinitializer) + %8 = tail call spir_func <4 x i32> @_Z30__spirv_ImageSampleExplicitLodI32__spirv_SampledImage__image1d_roDv4_jfET0_T_T1_if(target("spirv.SampledImage", void, 0, 0, 0, 0, 0, 0, 0) poison, float 0.000000e+00, i32 2, float 0.000000e+00) + ret void +} + +declare dso_local spir_func i32 @_Z17__spirv_ImageReadIi14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0), <4 x i32>) +declare dso_local spir_func <2 x i32> @_Z17__spirv_ImageReadIDv2_i14ocl_image2d_roS0_ET_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), <2 x i32>) +declare dso_local spir_func <4 x i32> @_Z17__spirv_ImageReadIDv4_j14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0), <4 x i32>) +declare dso_local spir_func signext i16 @_Z17__spirv_ImageReadIs14ocl_image1d_roiET_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0), i32) +declare dso_local spir_func zeroext i16 @_Z17__spirv_ImageReadIt14ocl_image3d_roDv4_iET_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0), <4 x i32>) +declare dso_local spir_func <2 x float> @_Z17__spirv_ImageReadIDv2_f14ocl_image1d_roiET_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0), i32) +declare dso_local spir_func half @_Z17__spirv_ImageReadIDF16_14ocl_image2d_roDv2_iET_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), <2 x i32>) +declare dso_local spir_func <4 x i32> @_Z30__spirv_ImageSampleExplicitLodI32__spirv_SampledImage__image1d_roDv4_jfET0_T_T1_if(target("spirv.SampledImage", void, 0, 0, 0, 0, 0, 0, 0), float noundef, i32 noundef, float noundef) + +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite +; CHECK-SPIRV: OpImageWrite + +define dso_local spir_kernel void @writes() { + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_iiEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1) poison, <4 x i32> zeroinitializer, i32 zeroinitializer) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image2d_woDv2_iS1_EvT_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1) poison, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_iDv4_jEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1) poison, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image1d_woisEvT_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) poison, i32 0, i16 signext 0) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_itEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1) poison, <4 x i32> zeroinitializer, i16 zeroext 0) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image1d_woiDv2_fEvT_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) poison, i32 0, <2 x float> zeroinitializer) + call spir_func void @_Z18__spirv_ImageWriteI14ocl_image2d_woDv2_iDF16_EvT_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1) poison, <2 x i32> zeroinitializer, half zeroinitializer) + ret void +} + +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_iiEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1), <4 x i32>, i32) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image2d_woDv2_iS1_EvT_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1), <2 x i32>, <2 x i32>) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_iDv4_jEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1), <4 x i32>, <4 x i32>) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image1d_woisEvT_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1), i32, i16 signext) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image3d_woDv4_itEvT_T0_T1_(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 1), <4 x i32>, i16 zeroext) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image1d_woiDv2_fEvT_T0_T1_(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1), i32, <2 x float>) +declare dso_local spir_func void @_Z18__spirv_ImageWriteI14ocl_image2d_woDv2_iDF16_EvT_T0_T1_(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1), <2 x i32>, half) diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll index 4d269cfff2afe..8449107f39e91 100644 --- a/llvm/test/CodeGen/X86/bfloat.ll +++ b/llvm/test/CodeGen/X86/bfloat.ll @@ -510,6 +510,103 @@ define bfloat @fold_ext_trunc2(bfloat %a) nounwind { ret bfloat %trunc } +define bfloat @fold_from_half(half %a) nounwind { +; X86-LABEL: fold_from_half: +; X86: # %bb.0: +; X86-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero +; X86-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0 +; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0 +; X86-NEXT: retl +; +; SSE2-LABEL: fold_from_half: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rax +; SSE2-NEXT: callq __extendhfsf2@PLT +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: popq %rax +; SSE2-NEXT: retq +; +; FP16-LABEL: fold_from_half: +; FP16: # %bb.0: +; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0 +; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0 +; FP16-NEXT: retq +; +; AVXNC-LABEL: fold_from_half: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0 +; AVXNC-NEXT: retq + %ext = fpext half %a to float + %trunc = fptrunc float %ext to bfloat + ret bfloat %trunc +} + +define half @fold_to_half(bfloat %a) nounwind { +; X86-LABEL: fold_to_half: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shll $16, %eax +; X86-NEXT: vmovd %eax, %xmm0 +; X86-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0 +; X86-NEXT: retl +; +; SSE2-LABEL: fold_to_half: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rax +; SSE2-NEXT: pextrw $0, %xmm0, %eax +; SSE2-NEXT: shll $16, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: callq __truncsfhf2@PLT +; SSE2-NEXT: popq %rax +; SSE2-NEXT: retq +; +; BF16-LABEL: fold_to_half: +; BF16: # %bb.0: +; BF16-NEXT: vpextrw $0, %xmm0, %eax +; BF16-NEXT: shll $16, %eax +; BF16-NEXT: vmovd %eax, %xmm0 +; BF16-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; BF16-NEXT: retq +; +; FP16-LABEL: fold_to_half: +; FP16: # %bb.0: +; FP16-NEXT: vmovw %xmm0, %eax +; FP16-NEXT: shll $16, %eax +; FP16-NEXT: vmovd %eax, %xmm0 +; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0 +; FP16-NEXT: retq + %ext = fpext bfloat %a to float + %trunc = fptrunc float %ext to half + ret half %trunc +} + +define bfloat @bitcast_from_half(half %a) nounwind { +; X86-LABEL: bitcast_from_half: +; X86: # %bb.0: +; X86-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero +; X86-NEXT: retl +; +; CHECK-LABEL: bitcast_from_half: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %bc = bitcast half %a to bfloat + ret bfloat %bc +} + +define half @bitcast_to_half(bfloat %a) nounwind { +; X86-LABEL: bitcast_to_half: +; X86: # %bb.0: +; X86-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero +; X86-NEXT: retl +; +; CHECK-LABEL: bitcast_to_half: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %bc = bitcast bfloat %a to half + ret half %bc +} + define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind { ; X86-LABEL: addv: ; X86: # %bb.0: @@ -747,15 +844,15 @@ define <32 x bfloat> @pr63017_2() nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: xorl %eax, %eax ; SSE2-NEXT: testb %al, %al -; SSE2-NEXT: jne .LBB12_1 +; SSE2-NEXT: jne .LBB16_1 ; SSE2-NEXT: # %bb.2: # %cond.load ; SSE2-NEXT: movzwl (%rax), %eax ; SSE2-NEXT: shll $16, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: jmp .LBB12_3 -; SSE2-NEXT: .LBB12_1: +; SSE2-NEXT: jmp .LBB16_3 +; SSE2-NEXT: .LBB16_1: ; SSE2-NEXT: movd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0] -; SSE2-NEXT: .LBB12_3: +; SSE2-NEXT: .LBB16_3: ; SSE2-NEXT: pushq %r14 ; SSE2-NEXT: pushq %rbx ; SSE2-NEXT: subq $88, %rsp @@ -992,10 +1089,10 @@ define <32 x bfloat> @pr63017_2() nounwind { ; AVXNC-NEXT: vbroadcastss {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] ; AVXNC-NEXT: xorl %eax, %eax ; AVXNC-NEXT: testb %al, %al -; AVXNC-NEXT: jne .LBB12_2 +; AVXNC-NEXT: jne .LBB16_2 ; AVXNC-NEXT: # %bb.1: # %cond.load ; AVXNC-NEXT: vmovups (%rax), %ymm0 -; AVXNC-NEXT: .LBB12_2: +; AVXNC-NEXT: .LBB16_2: ; AVXNC-NEXT: vmovaps %ymm0, %ymm1 ; AVXNC-NEXT: retq %1 = call <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr poison, i32 2, <32 x i1> poison, <32 x bfloat> ) diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll index 6fee9377d261a..ba51c65ccab13 100644 --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll @@ -6721,7 +6721,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 ; AVX512-NEXT: vmovdqa (%rcx), %xmm3 ; AVX512-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vmovdqa 32(%rcx), %xmm11 +; AVX512-NEXT: vmovdqa 32(%rcx), %xmm12 ; AVX512-NEXT: vmovdqa 48(%rcx), %xmm0 ; AVX512-NEXT: vmovdqa (%rdx), %xmm2 ; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -6729,11 +6729,11 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm10 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm6 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vmovdqa (%r10), %xmm5 ; AVX512-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX512-NEXT: vmovdqa 48(%r10), %xmm3 @@ -6742,8 +6742,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: vmovdqa 48(%rax), %xmm4 ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm2[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 ; AVX512-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] @@ -6751,18 +6751,19 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vmovdqa (%r9), %xmm5 ; AVX512-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vmovdqa 48(%r9), %xmm7 +; AVX512-NEXT: vmovdqa 48(%r9), %xmm6 ; AVX512-NEXT: vmovdqa (%r8), %xmm2 ; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vmovdqa 48(%r8), %xmm12 +; AVX512-NEXT: vmovdqa 48(%r8), %xmm8 ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,1,1,3,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm2[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm5, %ymm5 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,1,3,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 ; AVX512-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,5,5,7] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm8 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 +; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7] @@ -6770,80 +6771,80 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm9 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 +; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm10 = xmm2[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm10, %ymm5, %ymm5 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm5, %ymm5 ; AVX512-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 ; AVX512-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,1,1,3,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm13, %ymm5, %ymm5 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[2,1,3,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm5, %ymm5 ; AVX512-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vmovdqa 32(%rdx), %xmm5 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,4,5,5,7] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,5,7] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm13, %ymm2 -; AVX512-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill +; AVX512-NEXT: vinserti32x4 $1, %xmm2, %ymm9, %ymm26 ; AVX512-NEXT: vmovdqa 32(%r10), %xmm2 ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm1[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm10 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm1[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm9, %ymm0, %ymm28 ; AVX512-NEXT: vmovdqa 32(%rax), %xmm0 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm13 = xmm1[0,1,2,3,4,4,6,5] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm13, %ymm28 +; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm9, %ymm29 ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm30 +; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm25 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm27 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] +; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm23 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,1,3,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm26 +; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm19 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5,5,7] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm22 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] +; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm17 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm20 +; AVX512-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm16 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm18 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm14 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm7, %ymm3, %ymm25 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm6, %ymm3, %ymm20 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm21 +; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm18 ; AVX512-NEXT: vmovdqa 32(%r9), %xmm1 ; AVX512-NEXT: vmovdqa 32(%r8), %xmm3 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm7[0,1,1,3,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm7[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm13, %ymm12, %ymm19 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm7[0,1,2,3,4,5,5,7] -; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm7, %ymm12, %ymm17 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm11 = xmm5[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm11, %ymm7, %ymm4 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm6[0,1,1,3,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm9 = xmm6[2,1,3,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm8, %ymm15 +; AVX512-NEXT: vpshufhw {{.*#+}} xmm8 = xmm6[0,1,2,3,4,5,5,7] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7] +; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm8, %ymm13 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm5[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm6, %ymm4 ; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,4,6,5] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm4 +; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm4 ; AVX512-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,2,1,4,5,6,7] @@ -6863,273 +6864,244 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7] ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512-NEXT: vmovdqa 16(%rcx), %xmm11 -; AVX512-NEXT: vmovdqa 16(%rdx), %xmm7 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] +; AVX512-NEXT: vmovdqa 16(%rcx), %xmm4 +; AVX512-NEXT: vmovdqa 16(%rdx), %xmm3 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7] ; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1 -; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512-NEXT: vmovdqa 16(%r10), %xmm1 -; AVX512-NEXT: vmovdqa 16(%rax), %xmm15 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3],xmm15[4],xmm1[4],xmm15[5],xmm1[5],xmm15[6],xmm1[6],xmm15[7],xmm1[7] -; AVX512-NEXT: vmovdqa64 %xmm1, %xmm16 +; AVX512-NEXT: vmovdqa 16(%r10), %xmm9 +; AVX512-NEXT: vmovdqa 16(%rax), %xmm8 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1 -; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512-NEXT: vinserti32x4 $1, %xmm5, %ymm1, %ymm30 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm1, %ymm29 -; AVX512-NEXT: vmovdqa 16(%r9), %xmm14 -; AVX512-NEXT: vmovdqa 16(%r8), %xmm12 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,1,1,3,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm4[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm13, %ymm1, %ymm31 -; AVX512-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512-NEXT: vmovdqa (%rdi), %xmm2 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; AVX512-NEXT: vmovdqa64 %xmm2, %xmm23 -; AVX512-NEXT: vmovdqa64 %xmm0, %xmm24 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm13 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3] +; AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm1, %ymm31 +; AVX512-NEXT: vmovdqa 16(%r9), %xmm6 +; AVX512-NEXT: vmovdqa 16(%r8), %xmm5 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[0,1,1,3,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[2,1,3,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm11, %ymm1, %ymm27 ; AVX512-NEXT: vmovdqa 48(%rsi), %xmm1 -; AVX512-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] -; AVX512-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload -; AVX512-NEXT: # ymm0 = mem[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm6 -; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm13 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535] -; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm13 & (zmm6 ^ zmm0)) +; AVX512-NEXT: vmovdqa 48(%rdi), %xmm11 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7] +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm11[8],xmm1[8],xmm11[9],xmm1[9],xmm11[10],xmm1[10],xmm11[11],xmm1[11],xmm11[12],xmm1[12],xmm11[13],xmm1[13],xmm11[14],xmm1[14],xmm11[15],xmm1[15] +; AVX512-NEXT: vmovdqa (%rsi), %xmm7 +; AVX512-NEXT: vmovdqa (%rdi), %xmm12 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] +; AVX512-NEXT: vmovdqa64 %xmm7, %xmm21 +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm22 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero,xmm11[4],zero,zero,zero,xmm11[5],zero,zero,zero,xmm11[6],zero,zero,zero,xmm11[7],zero,zero,zero +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm24 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload +; AVX512-NEXT: # ymm11 = mem[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0 +; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm11 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535] +; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm0 ^ (zmm11 & (zmm22 ^ zmm0)) ; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512-NEXT: # ymm0 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload -; AVX512-NEXT: # ymm3 = mem[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 -; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload -; AVX512-NEXT: # ymm3 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm3, %zmm3 -; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0] -; AVX512-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512-NEXT: vpandq %zmm8, %zmm3, %zmm3 +; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload +; AVX512-NEXT: # ymm7 = mem[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0 +; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload +; AVX512-NEXT: # ymm7 = mem[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512-NEXT: # ymm10 = mem[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm10 +; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm7 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0] +; AVX512-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512-NEXT: vpandq %zmm7, %zmm10, %zmm10 ; AVX512-NEXT: movw $-21846, %ax # imm = 0xAAAA ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpord %zmm0, %zmm3, %zmm6 {%k1} +; AVX512-NEXT: vpord %zmm0, %zmm10, %zmm22 {%k1} ; AVX512-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512-NEXT: # ymm0 = mem[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm9 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm0 ^ (zmm13 & (zmm9 ^ zmm0)) +; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512-NEXT: # ymm10 = mem[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512-NEXT: vpternlogq {{.*#+}} zmm24 = zmm0 ^ (zmm11 & (zmm24 ^ zmm0)) ; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512-NEXT: # ymm0 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload -; AVX512-NEXT: # ymm2 = mem[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload -; AVX512-NEXT: # ymm2 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd $232, (%rsp), %ymm3 # 32-byte Folded Reload -; AVX512-NEXT: # ymm3 = mem[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512-NEXT: vpandq %zmm8, %zmm2, %zmm2 -; AVX512-NEXT: vpord %zmm0, %zmm2, %zmm9 {%k1} -; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm28[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm10 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm10 = zmm0 ^ (zmm13 & (zmm10 ^ zmm0)) -; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm30[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512-NEXT: # ymm10 = mem[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512-NEXT: # ymm10 = mem[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm26 = ymm26[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm26, %zmm10, %zmm10 +; AVX512-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512-NEXT: vpandq %zmm7, %zmm10, %zmm10 +; AVX512-NEXT: vpord %zmm0, %zmm10, %zmm24 {%k1} +; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm28[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm10 = ymm29[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm10 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX512-NEXT: vpternlogq {{.*#+}} zmm10 = zmm0 ^ (zmm11 & (zmm10 ^ zmm0)) +; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm25[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[0,2,2,3,4,6,6,7] ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm26[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512-NEXT: vpandq %zmm8, %zmm1, %zmm1 +; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm19[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm1, %zmm1 +; AVX512-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512-NEXT: vpandq %zmm7, %zmm1, %zmm1 ; AVX512-NEXT: vpord %zmm0, %zmm1, %zmm10 {%k1} -; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm20[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm18[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,5,5,7] -; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm27 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm7[8],xmm11[8],xmm7[9],xmm11[9],xmm7[10],xmm11[10],xmm7[11],xmm11[11],xmm7[12],xmm11[12],xmm7[13],xmm11[13],xmm7[14],xmm11[14],xmm7[15],xmm11[15] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm3[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm18 -; AVX512-NEXT: vmovdqa 32(%rsi), %xmm1 +; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm16[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1 +; AVX512-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,7] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm2, %ymm0, %ymm17 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm3, %ymm0, %ymm19 +; AVX512-NEXT: vmovdqa 32(%rsi), %xmm14 ; AVX512-NEXT: vmovdqa 32(%rdi), %xmm0 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm2 ^ (zmm13 & (zmm4 ^ zmm2)) -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm25[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2 -; AVX512-NEXT: vpshufd {{.*#+}} ymm5 = ymm19[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm7 = ymm17[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm5 -; AVX512-NEXT: vpandnq %zmm2, %zmm8, %zmm2 -; AVX512-NEXT: vpandq %zmm8, %zmm5, %zmm5 -; AVX512-NEXT: vpord %zmm2, %zmm5, %zmm4 {%k1} -; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,6,5] -; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm3, %ymm2, %ymm17 -; AVX512-NEXT: vmovdqa64 %xmm16, %xmm2 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm15[8],xmm2[8],xmm15[9],xmm2[9],xmm15[10],xmm2[10],xmm15[11],xmm2[11],xmm15[12],xmm2[12],xmm15[13],xmm2[13],xmm15[14],xmm2[14],xmm15[15],xmm2[15] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm11 & (zmm4 ^ zmm1)) +; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm20[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm15[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm3, %zmm3 +; AVX512-NEXT: vpandnq %zmm1, %zmm7, %zmm1 +; AVX512-NEXT: vpandq %zmm7, %zmm3, %zmm3 +; AVX512-NEXT: vpord %zmm1, %zmm3, %zmm4 {%k1} +; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,4,6,5] +; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm2, %ymm1, %ymm20 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm5, %ymm3, %ymm21 +; AVX512-NEXT: vpshuflw {{.*#+}} xmm8 = xmm2[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm8, %ymm3, %ymm23 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm2, %ymm3, %ymm22 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15] +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm9 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7] ; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm5, %ymm2, %ymm30 +; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2 ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,5,5,7] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7] ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3 -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload -; AVX512-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm5[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm14 = xmm5[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm14, %ymm12, %ymm14 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm12 = xmm5[0,1,2,3,4,4,6,5] +; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload +; AVX512-NEXT: # xmm5 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm13, %ymm6, %ymm13 +; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm12, %ymm15 -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload -; AVX512-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm12 = xmm5[0,0,2,1,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[0,2,2,3,4,5,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm7, %ymm12, %ymm19 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,4,6,5] +; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm15 +; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload +; AVX512-NEXT: # xmm5 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm6, %ymm16 +; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,4,6,5] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512-NEXT: vinserti32x4 $1, %xmm5, %ymm7, %ymm20 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload -; AVX512-NEXT: # xmm1 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7] -; AVX512-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[2,1,3,3,4,5,6,7] -; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm0 -; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,5,7] +; AVX512-NEXT: vinserti32x4 $1, %xmm5, %ymm1, %ymm18 +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm14[8],xmm0[9],xmm14[9],xmm0[10],xmm14[10],xmm0[11],xmm14[11],xmm0[12],xmm14[12],xmm0[13],xmm14[13],xmm0[14],xmm14[14],xmm0[15],xmm14[15] +; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload +; AVX512-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[0,1,1,3,4,5,6,7] +; AVX512-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[2,1,3,3,4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 +; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,5,7] ; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7] -; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm7, %ymm1 -; AVX512-NEXT: vmovdqa 16(%rsi), %xmm7 -; AVX512-NEXT: vmovdqa 16(%rdi), %xmm12 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7] -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] -; AVX512-NEXT: vmovdqa64 %xmm23, %xmm12 -; AVX512-NEXT: vmovdqa64 %xmm24, %xmm2 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3],xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7] -; AVX512-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload -; AVX512-NEXT: # ymm23 = mem[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512-NEXT: # ymm24 = mem[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm24, %zmm23, %zmm23 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm24 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3] -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm24, %zmm5 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm23 ^ (zmm13 & (zmm5 ^ zmm23)) -; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload -; AVX512-NEXT: # ymm23 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512-NEXT: # ymm24 = mem[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm24, %zmm23, %zmm23 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm24 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,3,2,3] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm6, %ymm1 +; AVX512-NEXT: vmovdqa 16(%rsi), %xmm6 +; AVX512-NEXT: vmovdqa 16(%rdi), %xmm14 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3],xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7] +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15] +; AVX512-NEXT: vmovdqa64 %xmm21, %xmm14 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] +; AVX512-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload +; AVX512-NEXT: # ymm14 = mem[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512-NEXT: # ymm25 = mem[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm14 ^ (zmm11 & (zmm0 ^ zmm14)) +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero,xmm8[4],zero,zero,zero,xmm8[5],zero,zero,zero,xmm8[6],zero,zero,zero,xmm8[7],zero,zero,zero +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero,xmm6[4],zero,zero,zero,xmm6[5],zero,zero,zero,xmm6[6],zero,zero,zero,xmm6[7],zero,zero,zero +; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero,xmm12[4],zero,zero,zero,xmm12[5],zero,zero,zero,xmm12[6],zero,zero,zero,xmm12[7],zero,zero,zero +; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload +; AVX512-NEXT: # ymm14 = mem[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512-NEXT: # ymm25 = mem[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 ; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload ; AVX512-NEXT: # ymm25 = mem[0,0,2,1,4,4,6,5] ; AVX512-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload ; AVX512-NEXT: # ymm26 = mem[0,2,2,3,4,6,6,7] ; AVX512-NEXT: vinserti64x4 $1, %ymm26, %zmm25, %zmm25 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm26 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3] -; AVX512-NEXT: vpandnq %zmm23, %zmm8, %zmm23 -; AVX512-NEXT: vpandq %zmm8, %zmm25, %zmm25 -; AVX512-NEXT: vpord %zmm23, %zmm25, %zmm5 {%k1} -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm23 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,3,2,3] -; AVX512-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload -; AVX512-NEXT: # ymm25 = mem[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload -; AVX512-NEXT: # ymm28 = mem[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm28, %zmm25, %zmm25 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm24, %zmm11 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm25 ^ (zmm13 & (zmm11 ^ zmm25)) -; AVX512-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512-NEXT: # ymm24 = mem[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm25 = ymm29[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm24, %zmm24 -; AVX512-NEXT: vpshufd {{.*#+}} ymm25 = ymm31[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm16 = ymm27[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm16, %zmm25, %zmm16 -; AVX512-NEXT: vpandnq %zmm24, %zmm8, %zmm24 -; AVX512-NEXT: vpandq %zmm8, %zmm16, %zmm16 -; AVX512-NEXT: vpord %zmm24, %zmm16, %zmm11 {%k1} -; AVX512-NEXT: vpshufd {{.*#+}} ymm16 = ymm18[0,1,1,3,4,5,5,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[2,1,3,3,6,5,7,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm26, %zmm7 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm16 ^ (zmm13 & (zmm7 ^ zmm16)) -; AVX512-NEXT: vpshufd {{.*#+}} ymm16 = ymm21[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm17 = ymm22[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm30[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpandnq %zmm14, %zmm7, %zmm14 +; AVX512-NEXT: vpandq %zmm7, %zmm25, %zmm25 +; AVX512-NEXT: vpord %zmm14, %zmm25, %zmm0 {%k1} +; AVX512-NEXT: vpshufd $212, (%rsp), %ymm14 # 32-byte Folded Reload +; AVX512-NEXT: # ymm14 = mem[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512-NEXT: # ymm25 = mem[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm14 ^ (zmm11 & (zmm8 ^ zmm14)) +; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm30[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm25 = ymm31[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512-NEXT: vpshufd {{.*#+}} ymm21 = ymm27[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm21, %zmm17 +; AVX512-NEXT: vpandnq %zmm14, %zmm7, %zmm14 +; AVX512-NEXT: vpandq %zmm7, %zmm17, %zmm17 +; AVX512-NEXT: vpord %zmm14, %zmm17, %zmm8 {%k1} +; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm19[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm17 = ymm20[2,1,3,3,6,5,7,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14 +; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm14 ^ (zmm11 & (zmm6 ^ zmm14)) +; AVX512-NEXT: vpshufd {{.*#+}} ymm14 = ymm23[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm14, %zmm9 +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5] ; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7] ; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512-NEXT: vpandnq %zmm16, %zmm8, %zmm3 -; AVX512-NEXT: vpandq %zmm8, %zmm2, %zmm2 -; AVX512-NEXT: vpord %zmm3, %zmm2, %zmm7 {%k1} -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm14[0,1,1,3,4,5,5,7] +; AVX512-NEXT: vpandnq %zmm9, %zmm7, %zmm3 +; AVX512-NEXT: vpandq %zmm7, %zmm2, %zmm2 +; AVX512-NEXT: vpord %zmm3, %zmm2, %zmm6 {%k1} +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm13[0,1,1,3,4,5,5,7] ; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm15[2,1,3,3,6,5,7,7] ; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm23, %zmm3 -; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm2 ^ (zmm13 & (zmm3 ^ zmm2)) -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm19[0,0,2,1,4,4,6,5] -; AVX512-NEXT: vpshufd {{.*#+}} ymm12 = ymm20[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm2, %zmm2 -; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpternlogq {{.*#+}} zmm12 = zmm2 ^ (zmm11 & (zmm12 ^ zmm2)) +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm16[0,0,2,1,4,4,6,5] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[0,2,2,3,4,6,6,7] +; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm5[0,0,2,1,4,4,6,5] ; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; AVX512-NEXT: vpandnq %zmm2, %zmm8, %zmm1 -; AVX512-NEXT: vpandq %zmm8, %zmm0, %zmm0 -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm3 {%k1} +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1 +; AVX512-NEXT: vpandnq %zmm2, %zmm7, %zmm2 +; AVX512-NEXT: vpandq %zmm7, %zmm1, %zmm1 +; AVX512-NEXT: vpord %zmm2, %zmm1, %zmm12 {%k1} ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa64 %zmm3, (%rax) -; AVX512-NEXT: vmovdqa64 %zmm7, 192(%rax) -; AVX512-NEXT: vmovdqa64 %zmm11, 128(%rax) -; AVX512-NEXT: vmovdqa64 %zmm5, 320(%rax) +; AVX512-NEXT: vmovdqa64 %zmm12, (%rax) +; AVX512-NEXT: vmovdqa64 %zmm6, 192(%rax) +; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rax) +; AVX512-NEXT: vmovdqa64 %zmm0, 320(%rax) ; AVX512-NEXT: vmovdqa64 %zmm4, 256(%rax) ; AVX512-NEXT: vmovdqa64 %zmm10, 448(%rax) -; AVX512-NEXT: vmovdqa64 %zmm9, 384(%rax) -; AVX512-NEXT: vmovdqa64 %zmm6, 64(%rax) +; AVX512-NEXT: vmovdqa64 %zmm24, 384(%rax) +; AVX512-NEXT: vmovdqa64 %zmm22, 64(%rax) ; AVX512-NEXT: addq $552, %rsp # imm = 0x228 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq @@ -7433,7 +7405,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10 ; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm3 ; AVX512DQ-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm11 +; AVX512DQ-NEXT: vmovdqa 32(%rcx), %xmm12 ; AVX512DQ-NEXT: vmovdqa 48(%rcx), %xmm0 ; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm2 ; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -7441,11 +7413,11 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512DQ-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm10 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm6 +; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vmovdqa (%r10), %xmm5 ; AVX512DQ-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX512DQ-NEXT: vmovdqa 48(%r10), %xmm3 @@ -7454,8 +7426,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: vmovdqa 48(%rax), %xmm4 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm2[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 ; AVX512DQ-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] @@ -7463,18 +7435,19 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vmovdqa (%r9), %xmm5 ; AVX512DQ-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512DQ-NEXT: vmovdqa 48(%r9), %xmm7 +; AVX512DQ-NEXT: vmovdqa 48(%r9), %xmm6 ; AVX512DQ-NEXT: vmovdqa (%r8), %xmm2 ; AVX512DQ-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512DQ-NEXT: vmovdqa 48(%r8), %xmm12 +; AVX512DQ-NEXT: vmovdqa 48(%r8), %xmm8 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,1,1,3,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm2[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[2,1,3,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 ; AVX512DQ-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm8 +; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7] @@ -7482,80 +7455,80 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm9 +; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm10 = xmm2[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm5, %ymm5 ; AVX512DQ-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 ; AVX512DQ-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,1,1,3,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm13, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm2[2,1,3,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm5, %ymm5 ; AVX512DQ-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vmovdqa 32(%rdx), %xmm5 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm13, %ymm2 -; AVX512DQ-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm2, %ymm9, %ymm26 ; AVX512DQ-NEXT: vmovdqa 32(%r10), %xmm2 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm1[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm10 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm1[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm9, %ymm0, %ymm28 ; AVX512DQ-NEXT: vmovdqa 32(%rax), %xmm0 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm13 = xmm1[0,1,2,3,4,4,6,5] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm13, %ymm28 +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm9, %ymm29 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm30 +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm25 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm27 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm23 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,1,3,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm26 +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm19 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm22 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm17 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm20 +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm4, %ymm3, %ymm16 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm18 +; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm14 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm7, %ymm3, %ymm25 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm6, %ymm3, %ymm20 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm21 +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm3, %ymm18 ; AVX512DQ-NEXT: vmovdqa 32(%r9), %xmm1 ; AVX512DQ-NEXT: vmovdqa 32(%r8), %xmm3 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm7[0,1,1,3,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm7[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm13, %ymm12, %ymm19 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm12 = xmm7[0,1,2,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm7, %ymm12, %ymm17 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm11 = xmm5[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm11, %ymm7, %ymm4 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm6[0,1,1,3,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm9 = xmm6[2,1,3,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm8, %ymm15 +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm6[0,1,2,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm8, %ymm13 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm5[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm6, %ymm4 ; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,4,6,5] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm4 +; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm4 ; AVX512DQ-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,2,1,4,5,6,7] @@ -7575,273 +7548,244 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7] ; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512DQ-NEXT: vmovdqa 16(%rcx), %xmm11 -; AVX512DQ-NEXT: vmovdqa 16(%rdx), %xmm7 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] +; AVX512DQ-NEXT: vmovdqa 16(%rcx), %xmm4 +; AVX512DQ-NEXT: vmovdqa 16(%rdx), %xmm3 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7] ; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1 -; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512DQ-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX512DQ-NEXT: vmovdqa 16(%r10), %xmm1 -; AVX512DQ-NEXT: vmovdqa 16(%rax), %xmm15 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3],xmm15[4],xmm1[4],xmm15[5],xmm1[5],xmm15[6],xmm1[6],xmm15[7],xmm1[7] -; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm16 +; AVX512DQ-NEXT: vmovdqa 16(%r10), %xmm9 +; AVX512DQ-NEXT: vmovdqa 16(%rax), %xmm8 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1 -; AVX512DQ-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm5, %ymm1, %ymm30 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm1, %ymm29 -; AVX512DQ-NEXT: vmovdqa 16(%r9), %xmm14 -; AVX512DQ-NEXT: vmovdqa 16(%r8), %xmm12 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,1,1,3,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm4[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm13, %ymm1, %ymm31 -; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm2 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; AVX512DQ-NEXT: vmovdqa64 %xmm2, %xmm23 -; AVX512DQ-NEXT: vmovdqa64 %xmm0, %xmm24 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm13 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm1, %ymm31 +; AVX512DQ-NEXT: vmovdqa 16(%r9), %xmm6 +; AVX512DQ-NEXT: vmovdqa 16(%r8), %xmm5 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[0,1,1,3,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm11 = xmm2[2,1,3,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm11, %ymm1, %ymm27 ; AVX512DQ-NEXT: vmovdqa 48(%rsi), %xmm1 -; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] -; AVX512DQ-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm0 = mem[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm6 -; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} zmm13 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535] -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm13 & (zmm6 ^ zmm0)) +; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm11 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm11[8],xmm1[8],xmm11[9],xmm1[9],xmm11[10],xmm1[10],xmm11[11],xmm1[11],xmm11[12],xmm1[12],xmm11[13],xmm1[13],xmm11[14],xmm1[14],xmm11[15],xmm1[15] +; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm7 +; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm12 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] +; AVX512DQ-NEXT: vmovdqa64 %xmm7, %xmm21 +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm22 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero,xmm11[4],zero,zero,zero,xmm11[5],zero,zero,zero,xmm11[6],zero,zero,zero,xmm11[7],zero,zero,zero +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm24 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm11 = mem[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} zmm11 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535] +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm0 ^ (zmm11 & (zmm22 ^ zmm0)) ; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512DQ-NEXT: # ymm0 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm3 = mem[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm3 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm3, %zmm3 -; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0] -; AVX512DQ-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm3, %zmm3 +; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm7 = mem[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm7 = mem[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm10 = mem[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm10 +; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} zmm7 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0] +; AVX512DQ-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm10, %zmm10 ; AVX512DQ-NEXT: movw $-21846, %ax # imm = 0xAAAA ; AVX512DQ-NEXT: kmovw %eax, %k1 -; AVX512DQ-NEXT: vpord %zmm0, %zmm3, %zmm6 {%k1} +; AVX512DQ-NEXT: vpord %zmm0, %zmm10, %zmm22 {%k1} ; AVX512DQ-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512DQ-NEXT: # ymm0 = mem[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm9 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm0 ^ (zmm13 & (zmm9 ^ zmm0)) +; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm10 = mem[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm24 = zmm0 ^ (zmm11 & (zmm24 ^ zmm0)) ; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload ; AVX512DQ-NEXT: # ymm0 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm2 = mem[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm2 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd $232, (%rsp), %ymm3 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm3 = mem[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpord %zmm0, %zmm2, %zmm9 {%k1} -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm10[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm28[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm10 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm10 = zmm0 ^ (zmm13 & (zmm10 ^ zmm0)) -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm30[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm27[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm10 = mem[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm10 = mem[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm26 = ymm26[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm26, %zmm10, %zmm10 +; AVX512DQ-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm10, %zmm10 +; AVX512DQ-NEXT: vpord %zmm0, %zmm10, %zmm24 {%k1} +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm28[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm10 = ymm29[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm10 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm10 = zmm0 ^ (zmm11 & (zmm10 ^ zmm0)) +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm25[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm23[0,2,2,3,4,6,6,7] ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm26[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm22[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512DQ-NEXT: vpandnq %zmm0, %zmm8, %zmm0 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm19[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpandnq %zmm0, %zmm7, %zmm0 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm1, %zmm1 ; AVX512DQ-NEXT: vpord %zmm0, %zmm1, %zmm10 {%k1} -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm20[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm18[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm27 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm7[8],xmm11[8],xmm7[9],xmm11[9],xmm7[10],xmm11[10],xmm7[11],xmm11[11],xmm7[12],xmm11[12],xmm7[13],xmm11[13],xmm7[14],xmm11[14],xmm7[15],xmm11[15] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm3[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm18 -; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm1 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm16[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm14[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm2, %ymm0, %ymm17 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm3, %ymm0, %ymm19 +; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm14 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm0 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm2 ^ (zmm13 & (zmm4 ^ zmm2)) -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm25[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm21[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm5 = ymm19[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm7 = ymm17[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm5 -; AVX512DQ-NEXT: vpandnq %zmm2, %zmm8, %zmm2 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm5, %zmm5 -; AVX512DQ-NEXT: vpord %zmm2, %zmm5, %zmm4 {%k1} -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,6,5] -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm3, %ymm2, %ymm17 -; AVX512DQ-NEXT: vmovdqa64 %xmm16, %xmm2 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm15[8],xmm2[8],xmm15[9],xmm2[9],xmm15[10],xmm2[10],xmm15[11],xmm2[11],xmm15[12],xmm2[12],xmm15[13],xmm2[13],xmm15[14],xmm2[14],xmm15[15],xmm2[15] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm11 & (zmm4 ^ zmm1)) +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm20[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm15[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm3, %zmm3 +; AVX512DQ-NEXT: vpandnq %zmm1, %zmm7, %zmm1 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm3, %zmm3 +; AVX512DQ-NEXT: vpord %zmm1, %zmm3, %zmm4 {%k1} +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,4,6,5] +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm2, %ymm1, %ymm20 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm5, %ymm3, %ymm21 +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm8 = xmm2[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm8, %ymm3, %ymm23 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm2, %ymm3, %ymm22 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15] +; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm9 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7] ; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm5, %ymm2, %ymm30 +; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7] ; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3 -; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload -; AVX512DQ-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm5[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm14 = xmm5[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm14, %ymm12, %ymm14 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm12 = xmm5[0,1,2,3,4,4,6,5] +; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload +; AVX512DQ-NEXT: # xmm5 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm13, %ymm6, %ymm13 +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm12, %ymm15 -; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload -; AVX512DQ-NEXT: # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm12 = xmm5[0,0,2,1,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[0,2,2,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm7, %ymm12, %ymm19 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,4,6,5] +; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm15 +; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload +; AVX512DQ-NEXT: # xmm5 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm1, %ymm6, %ymm16 +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,4,6,5] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti32x4 $1, %xmm5, %ymm7, %ymm20 -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload -; AVX512DQ-NEXT: # xmm1 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7] -; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[2,1,3,3,4,5,6,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm0 -; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,5,7] +; AVX512DQ-NEXT: vinserti32x4 $1, %xmm5, %ymm1, %ymm18 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm14[8],xmm0[9],xmm14[9],xmm0[10],xmm14[10],xmm0[11],xmm14[11],xmm0[12],xmm14[12],xmm0[13],xmm14[13],xmm0[14],xmm14[14],xmm0[15],xmm14[15] +; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX512DQ-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload +; AVX512DQ-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[0,1,1,3,4,5,6,7] +; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[2,1,3,3,4,5,6,7] +; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm7, %ymm1 -; AVX512DQ-NEXT: vmovdqa 16(%rsi), %xmm7 -; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm12 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7] -; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15] -; AVX512DQ-NEXT: vmovdqa64 %xmm23, %xmm12 -; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm2 -; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3],xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7] -; AVX512DQ-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm23 = mem[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm24 = mem[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm24, %zmm23, %zmm23 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm24 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3] -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm24, %zmm5 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm23 ^ (zmm13 & (zmm5 ^ zmm23)) -; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm23 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm24 = mem[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm24, %zmm23, %zmm23 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm24 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[2,3,2,3] +; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm6, %ymm1 +; AVX512DQ-NEXT: vmovdqa 16(%rsi), %xmm6 +; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm14 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3],xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15] +; AVX512DQ-NEXT: vmovdqa64 %xmm21, %xmm14 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] +; AVX512DQ-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm14 = mem[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm25 = mem[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm14 ^ (zmm11 & (zmm0 ^ zmm14)) +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero,xmm8[4],zero,zero,zero,xmm8[5],zero,zero,zero,xmm8[6],zero,zero,zero,xmm8[7],zero,zero,zero +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero,xmm6[4],zero,zero,zero,xmm6[5],zero,zero,zero,xmm6[6],zero,zero,zero,xmm6[7],zero,zero,zero +; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} zmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero,xmm12[4],zero,zero,zero,xmm12[5],zero,zero,zero,xmm12[6],zero,zero,zero,xmm12[7],zero,zero,zero +; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm14 = mem[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm25 = mem[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 ; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload ; AVX512DQ-NEXT: # ymm25 = mem[0,0,2,1,4,4,6,5] ; AVX512DQ-NEXT: vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload ; AVX512DQ-NEXT: # ymm26 = mem[0,2,2,3,4,6,6,7] ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm26, %zmm25, %zmm25 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm26 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3] -; AVX512DQ-NEXT: vpandnq %zmm23, %zmm8, %zmm23 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm25, %zmm25 -; AVX512DQ-NEXT: vpord %zmm23, %zmm25, %zmm5 {%k1} -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm23 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,3,2,3] -; AVX512DQ-NEXT: vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm25 = mem[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm28 = mem[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm28, %zmm25, %zmm25 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm24, %zmm11 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm25 ^ (zmm13 & (zmm11 ^ zmm25)) -; AVX512DQ-NEXT: vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload -; AVX512DQ-NEXT: # ymm24 = mem[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm25 = ymm29[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm24, %zmm24 -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm25 = ymm31[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm16 = ymm27[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm16, %zmm25, %zmm16 -; AVX512DQ-NEXT: vpandnq %zmm24, %zmm8, %zmm24 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm16, %zmm16 -; AVX512DQ-NEXT: vpord %zmm24, %zmm16, %zmm11 {%k1} -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm16 = ymm18[0,1,1,3,4,5,5,7] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[2,1,3,3,6,5,7,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm26, %zmm7 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm16 ^ (zmm13 & (zmm7 ^ zmm16)) -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm16 = ymm21[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm17 = ymm22[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm30[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpandnq %zmm14, %zmm7, %zmm14 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm25, %zmm25 +; AVX512DQ-NEXT: vpord %zmm14, %zmm25, %zmm0 {%k1} +; AVX512DQ-NEXT: vpshufd $212, (%rsp), %ymm14 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm14 = mem[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload +; AVX512DQ-NEXT: # ymm25 = mem[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm14 ^ (zmm11 & (zmm8 ^ zmm14)) +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm30[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm25 = ymm31[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm25, %zmm14, %zmm14 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm21 = ymm27[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm21, %zmm17 +; AVX512DQ-NEXT: vpandnq %zmm14, %zmm7, %zmm14 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm17, %zmm17 +; AVX512DQ-NEXT: vpord %zmm14, %zmm17, %zmm8 {%k1} +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm19[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm17 = ymm20[2,1,3,3,6,5,7,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm17, %zmm14, %zmm14 +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm14 ^ (zmm11 & (zmm6 ^ zmm14)) +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm14 = ymm23[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm14, %zmm9 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,0,2,1,4,4,6,5] ; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7] ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpandnq %zmm16, %zmm8, %zmm3 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpord %zmm3, %zmm2, %zmm7 {%k1} -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm14[0,1,1,3,4,5,5,7] +; AVX512DQ-NEXT: vpandnq %zmm9, %zmm7, %zmm3 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm2, %zmm2 +; AVX512DQ-NEXT: vpord %zmm3, %zmm2, %zmm6 {%k1} +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm13[0,1,1,3,4,5,5,7] ; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm15[2,1,3,3,6,5,7,7] ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm23, %zmm3 -; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm2 ^ (zmm13 & (zmm3 ^ zmm2)) -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm19[0,0,2,1,4,4,6,5] -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm12 = ymm20[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm2, %zmm2 -; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm12 = zmm2 ^ (zmm11 & (zmm12 ^ zmm2)) +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm2 = ymm16[0,0,2,1,4,4,6,5] +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm18[0,2,2,3,4,6,6,7] +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm3 = ymm5[0,0,2,1,4,4,6,5] ; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] -; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpandnq %zmm2, %zmm8, %zmm1 -; AVX512DQ-NEXT: vpandq %zmm8, %zmm0, %zmm0 -; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm3 {%k1} +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1 +; AVX512DQ-NEXT: vpandnq %zmm2, %zmm7, %zmm2 +; AVX512DQ-NEXT: vpandq %zmm7, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpord %zmm2, %zmm1, %zmm12 {%k1} ; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512DQ-NEXT: vmovdqa64 %zmm3, (%rax) -; AVX512DQ-NEXT: vmovdqa64 %zmm7, 192(%rax) -; AVX512DQ-NEXT: vmovdqa64 %zmm11, 128(%rax) -; AVX512DQ-NEXT: vmovdqa64 %zmm5, 320(%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm12, (%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm6, 192(%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm8, 128(%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm0, 320(%rax) ; AVX512DQ-NEXT: vmovdqa64 %zmm4, 256(%rax) ; AVX512DQ-NEXT: vmovdqa64 %zmm10, 448(%rax) -; AVX512DQ-NEXT: vmovdqa64 %zmm9, 384(%rax) -; AVX512DQ-NEXT: vmovdqa64 %zmm6, 64(%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm24, 384(%rax) +; AVX512DQ-NEXT: vmovdqa64 %zmm22, 64(%rax) ; AVX512DQ-NEXT: addq $552, %rsp # imm = 0x228 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq @@ -8146,284 +8090,260 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512BW-NEXT: vmovdqa 16(%r10), %xmm12 ; AVX512BW-NEXT: vmovdqa64 32(%r10), %xmm16 ; AVX512BW-NEXT: vmovdqa 48(%r10), %xmm15 -; AVX512BW-NEXT: vmovdqa (%rax), %xmm2 +; AVX512BW-NEXT: vmovdqa (%rax), %xmm1 ; AVX512BW-NEXT: vmovdqa 16(%rax), %xmm13 ; AVX512BW-NEXT: vmovdqa64 32(%rax), %xmm17 ; AVX512BW-NEXT: vmovdqa64 48(%rax), %xmm18 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [0,0,0,0,4,5,2,1,0,2,0,2,4,5,2,3] -; AVX512BW-NEXT: vpermw %ymm1, %ymm3, %ymm1 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm1 -; AVX512BW-NEXT: vmovdqa (%r9), %xmm4 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm2[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,0,0,0,4,5,2,1,0,2,0,2,4,5,2,3] +; AVX512BW-NEXT: vpermw %ymm3, %ymm2, %ymm3 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm6 +; AVX512BW-NEXT: vmovdqa (%r9), %xmm3 ; AVX512BW-NEXT: vmovdqa64 48(%r9), %xmm19 -; AVX512BW-NEXT: vmovdqa (%r8), %xmm5 -; AVX512BW-NEXT: vmovdqa64 48(%r8), %xmm21 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vmovdqa (%r8), %xmm4 +; AVX512BW-NEXT: vmovdqa64 48(%r8), %xmm20 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,4,5,5,7] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm6, %ymm6 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm6[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [0,1,0,1,4,5,1,3,2,1,2,1,4,5,3,3] -; AVX512BW-NEXT: vpermw %ymm7, %ymm6, %ymm7 +; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm5, %ymm5 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm5 = [0,1,0,1,4,5,1,3,2,1,2,1,4,5,3,3] +; AVX512BW-NEXT: vpermw %ymm7, %ymm5, %ymm7 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm14 ; AVX512BW-NEXT: movl $-2004318072, %eax # imm = 0x88888888 ; AVX512BW-NEXT: kmovd %eax, %k1 -; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm14 {%k1} -; AVX512BW-NEXT: vmovdqa (%rsi), %xmm7 +; AVX512BW-NEXT: vmovdqu16 %zmm6, %zmm14 {%k1} +; AVX512BW-NEXT: vmovdqa (%rcx), %xmm6 +; AVX512BW-NEXT: vmovdqa64 48(%rcx), %xmm21 +; AVX512BW-NEXT: vmovdqa (%rdx), %xmm8 +; AVX512BW-NEXT: vmovdqa64 48(%rdx), %xmm23 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti128 $1, %xmm10, %ymm9, %ymm9 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm9 = [0,0,2,1,2,1,6,7,0,2,2,3,2,3,6,7] +; AVX512BW-NEXT: vpermw %ymm7, %ymm9, %ymm7 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm22 +; AVX512BW-NEXT: vmovdqa (%rsi), %xmm10 ; AVX512BW-NEXT: vmovdqa64 48(%rsi), %xmm24 -; AVX512BW-NEXT: vmovdqa (%rdi), %xmm8 -; AVX512BW-NEXT: vmovdqa64 48(%rdi), %xmm26 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm9 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1 -; AVX512BW-NEXT: vmovdqa (%rcx), %xmm9 -; AVX512BW-NEXT: vmovdqa64 48(%rcx), %xmm28 -; AVX512BW-NEXT: vmovdqa (%rdx), %xmm10 -; AVX512BW-NEXT: vmovdqa64 48(%rdx), %xmm30 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm20[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm22, %ymm11, %ymm11 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm11[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,0,2,1,2,1,6,7,0,2,2,3,2,3,6,7] -; AVX512BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm20, %zmm20 +; AVX512BW-NEXT: vmovdqa (%rdi), %xmm11 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero,xmm7[4],zero,zero,zero,xmm7[5],zero,zero,zero,xmm7[6],zero,zero,zero,xmm7[7],zero,zero,zero ; AVX512BW-NEXT: movl $572662306, %eax # imm = 0x22222222 ; AVX512BW-NEXT: kmovd %eax, %k2 -; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm1 {%k2} +; AVX512BW-NEXT: vmovdqu16 %zmm22, %zmm7 {%k2} ; AVX512BW-NEXT: movw $-21846, %ax # imm = 0xAAAA ; AVX512BW-NEXT: kmovd %eax, %k3 -; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm1 {%k3} +; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm7 {%k3} ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm18[0],xmm15[0],xmm18[1],xmm15[1],xmm18[2],xmm15[2],xmm18[3],xmm15[3],xmm18[4],xmm15[4],xmm18[5],xmm15[5],xmm18[6],xmm15[6],xmm18[7],xmm15[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm14[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm22, %ymm20, %ymm20 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm14, %ymm3, %ymm14 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm20, %zmm14, %zmm14 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm21[0],xmm19[0],xmm21[1],xmm19[1],xmm21[2],xmm19[2],xmm21[3],xmm19[3],xmm21[4],xmm19[4],xmm21[5],xmm19[5],xmm21[6],xmm19[6],xmm21[7],xmm19[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,5,5,7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm23, %ymm22, %ymm22 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm14[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm22 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm22[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm20, %ymm6, %ymm20 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm20, %zmm23 -; AVX512BW-NEXT: vmovdqu16 %zmm14, %zmm23 {%k1} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm20 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm14 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm20, %zmm14 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm30[0],xmm28[0],xmm30[1],xmm28[1],xmm30[2],xmm28[2],xmm30[3],xmm28[3],xmm30[4],xmm28[4],xmm30[5],xmm28[5],xmm30[6],xmm28[6],xmm30[7],xmm28[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm25 +; AVX512BW-NEXT: vpermw %ymm14, %ymm2, %ymm14 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm14, %zmm14 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm22 = xmm20[0],xmm19[0],xmm20[1],xmm19[1],xmm20[2],xmm19[2],xmm20[3],xmm19[3],xmm20[4],xmm19[4],xmm20[5],xmm19[5],xmm20[6],xmm19[6],xmm20[7],xmm19[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm22[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm22[0,1,2,3,6,5,7,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm22, %ymm5, %ymm22 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm25, %zmm22, %zmm25 +; AVX512BW-NEXT: vmovdqu16 %zmm14, %zmm25 {%k1} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm14[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm26, %ymm22, %ymm22 +; AVX512BW-NEXT: vmovdqa64 48(%rdi), %xmm27 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm22[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm14, %ymm9, %ymm14 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm14, %zmm22 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm27[0],xmm24[0],xmm27[1],xmm24[1],xmm27[2],xmm24[2],xmm27[3],xmm24[3],xmm27[4],xmm24[4],xmm27[5],xmm24[5],xmm27[6],xmm24[6],xmm27[7],xmm24[7] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm14 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero,xmm14[4],zero,zero,zero,xmm14[5],zero,zero,zero,xmm14[6],zero,zero,zero,xmm14[7],zero,zero,zero +; AVX512BW-NEXT: vmovdqu16 %zmm22, %zmm14 {%k2} ; AVX512BW-NEXT: vmovdqa64 32(%r9), %xmm22 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm25, %zmm20, %zmm20 -; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm25 -; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm14 {%k2} -; AVX512BW-NEXT: vmovdqa64 32(%rsi), %xmm20 -; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm14 {%k3} +; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm14 {%k3} ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm18[8],xmm15[8],xmm18[9],xmm15[9],xmm18[10],xmm15[10],xmm18[11],xmm15[11],xmm18[12],xmm15[12],xmm18[13],xmm15[13],xmm18[14],xmm15[14],xmm18[15],xmm15[15] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm18 = xmm15[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm15[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm23, %ymm18, %ymm18 -; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm23 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm15[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm25, %ymm18, %ymm18 +; AVX512BW-NEXT: vmovdqa64 32(%r8), %xmm25 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm18 = ymm18[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm15, %ymm3, %ymm15 +; AVX512BW-NEXT: vpermw %ymm15, %ymm2, %ymm15 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm18, %zmm15, %zmm15 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm21[8],xmm19[8],xmm21[9],xmm19[9],xmm21[10],xmm19[10],xmm21[11],xmm19[11],xmm21[12],xmm19[12],xmm21[13],xmm19[13],xmm21[14],xmm19[14],xmm21[15],xmm19[15] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm20[8],xmm19[8],xmm20[9],xmm19[9],xmm20[10],xmm19[10],xmm20[11],xmm19[11],xmm20[12],xmm19[12],xmm20[13],xmm19[13],xmm20[14],xmm19[14],xmm20[15],xmm19[15] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,5,5,7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm18[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 -; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm27 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm18[0,1,2,3,6,5,7,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm20, %ymm19, %ymm19 +; AVX512BW-NEXT: vmovdqa64 32(%rcx), %xmm20 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm18, %ymm6, %ymm18 +; AVX512BW-NEXT: vpermw %ymm18, %ymm5, %ymm18 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm18 -; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm29 +; AVX512BW-NEXT: vmovdqa64 32(%rdx), %xmm26 ; AVX512BW-NEXT: vmovdqu16 %zmm15, %zmm18 {%k1} -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm15 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm15, %zmm19, %zmm15 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm19[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm21, %ymm21 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm21, %zmm19, %zmm19 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm15[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm15[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512BW-NEXT: vmovdqa64 32(%rsi), %xmm21 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm15, %ymm9, %ymm15 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm15, %zmm19 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm27[8],xmm24[8],xmm27[9],xmm24[9],xmm27[10],xmm24[10],xmm27[11],xmm24[11],xmm27[12],xmm24[12],xmm27[13],xmm24[13],xmm27[14],xmm24[14],xmm27[15],xmm24[15] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm15 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero,xmm15[4],zero,zero,zero,xmm15[5],zero,zero,zero,xmm15[6],zero,zero,zero,xmm15[7],zero,zero,zero ; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm15 {%k2} ; AVX512BW-NEXT: vmovdqa32 %zmm18, %zmm15 {%k3} ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3],xmm17[4],xmm16[4],xmm17[5],xmm16[5],xmm17[6],xmm16[6],xmm17[7],xmm16[7] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm18[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm18[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm23, %ymm19, %ymm19 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm18, %ymm3, %ymm18 +; AVX512BW-NEXT: vpermw %ymm18, %ymm2, %ymm18 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm18 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm25[0],xmm22[0],xmm25[1],xmm22[1],xmm25[2],xmm22[2],xmm25[3],xmm22[3],xmm25[4],xmm22[4],xmm25[5],xmm22[5],xmm25[6],xmm22[6],xmm25[7],xmm22[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm19[0,1,2,3,4,5,5,7] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm19[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm21, %ymm21 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm19, %ymm6, %ymm19 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm21, %zmm19, %zmm24 -; AVX512BW-NEXT: vmovdqu16 %zmm18, %zmm24 {%k1} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm23[0],xmm20[0],xmm23[1],xmm20[1],xmm23[2],xmm20[2],xmm23[3],xmm20[3],xmm23[4],xmm20[4],xmm23[5],xmm20[5],xmm23[6],xmm20[6],xmm23[7],xmm20[7] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm18 = xmm18[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm18 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm18, %zmm19, %zmm18 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm29[0],xmm27[0],xmm29[1],xmm27[1],xmm29[2],xmm27[2],xmm29[3],xmm27[3],xmm29[4],xmm27[4],xmm29[5],xmm27[5],xmm29[6],xmm27[6],xmm29[7],xmm27[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm19[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm26, %ymm21, %ymm26 -; AVX512BW-NEXT: vmovdqa64 16(%r9), %xmm21 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm26[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm26, %zmm19, %zmm19 -; AVX512BW-NEXT: vmovdqa64 16(%r8), %xmm26 +; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm23, %ymm23 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm19, %ymm5, %ymm19 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm23, %zmm19, %zmm23 +; AVX512BW-NEXT: vmovdqu16 %zmm18, %zmm23 {%k1} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm26[0],xmm20[0],xmm26[1],xmm20[1],xmm26[2],xmm20[2],xmm26[3],xmm20[3],xmm26[4],xmm20[4],xmm26[5],xmm20[5],xmm26[6],xmm20[6],xmm26[7],xmm20[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm18[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm19, %ymm19 +; AVX512BW-NEXT: vmovdqa64 32(%rdi), %xmm27 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm18, %ymm9, %ymm18 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm19 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm27[0],xmm21[0],xmm27[1],xmm21[1],xmm27[2],xmm21[2],xmm27[3],xmm21[3],xmm27[4],xmm21[4],xmm27[5],xmm21[5],xmm27[6],xmm21[6],xmm27[7],xmm21[7] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm18 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero,xmm18[4],zero,zero,zero,xmm18[5],zero,zero,zero,xmm18[6],zero,zero,zero,xmm18[7],zero,zero,zero ; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm18 {%k2} -; AVX512BW-NEXT: vmovdqa64 16(%rsi), %xmm19 -; AVX512BW-NEXT: vmovdqa32 %zmm24, %zmm18 {%k3} +; AVX512BW-NEXT: vmovdqa64 16(%r9), %xmm19 +; AVX512BW-NEXT: vmovdqa32 %zmm23, %zmm18 {%k3} ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm16[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm16[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm17, %ymm17 -; AVX512BW-NEXT: vmovdqa64 16(%rdi), %xmm24 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm16[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm23, %ymm17, %ymm17 +; AVX512BW-NEXT: vmovdqa64 16(%r8), %xmm23 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm16, %ymm3, %ymm16 +; AVX512BW-NEXT: vpermw %ymm16, %ymm2, %ymm16 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm17[0,1,2,3,4,5,5,7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm17[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm25 -; AVX512BW-NEXT: vmovdqa64 16(%rcx), %xmm22 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm17, %ymm6, %ymm17 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm25, %zmm17, %zmm17 -; AVX512BW-NEXT: vmovdqa64 16(%rdx), %xmm25 -; AVX512BW-NEXT: vmovdqu16 %zmm16, %zmm17 {%k1} -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm23[8],xmm20[8],xmm23[9],xmm20[9],xmm23[10],xmm20[10],xmm23[11],xmm20[11],xmm23[12],xmm20[12],xmm23[13],xmm20[13],xmm23[14],xmm20[14],xmm23[15],xmm20[15] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm20 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm16 = xmm16[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm16 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm16 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm29[8],xmm27[8],xmm29[9],xmm27[9],xmm29[10],xmm27[10],xmm29[11],xmm27[11],xmm29[12],xmm27[12],xmm29[13],xmm27[13],xmm29[14],xmm27[14],xmm29[15],xmm27[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm20[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm27, %ymm23, %ymm23 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm22[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm22[0,1,2,3,6,5,7,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm24, %ymm17, %ymm24 +; AVX512BW-NEXT: vmovdqa64 16(%rcx), %xmm17 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm24 = ymm24[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm22, %ymm5, %ymm22 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm24, %zmm22, %zmm25 +; AVX512BW-NEXT: vmovdqa64 16(%rdx), %xmm24 +; AVX512BW-NEXT: vmovdqu16 %zmm16, %zmm25 {%k1} +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm20[8],xmm26[9],xmm20[9],xmm26[10],xmm20[10],xmm26[11],xmm20[11],xmm26[12],xmm20[12],xmm26[13],xmm20[13],xmm26[14],xmm20[14],xmm26[15],xmm20[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm16[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm16[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm22, %ymm20, %ymm20 +; AVX512BW-NEXT: vmovdqa64 16(%rsi), %xmm22 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm16, %ymm9, %ymm16 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm20, %zmm16, %zmm20 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm27[8],xmm21[8],xmm27[9],xmm21[9],xmm27[10],xmm21[10],xmm27[11],xmm21[11],xmm27[12],xmm21[12],xmm27[13],xmm21[13],xmm27[14],xmm21[14],xmm27[15],xmm21[15] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm16 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero,xmm16[4],zero,zero,zero,xmm16[5],zero,zero,zero,xmm16[6],zero,zero,zero,xmm16[7],zero,zero,zero ; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm16 {%k2} -; AVX512BW-NEXT: vmovdqa32 %zmm17, %zmm16 {%k3} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm17[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm17[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm23, %ymm20, %ymm20 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm17, %ymm3, %ymm17 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm20, %zmm17, %zmm17 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm26[0],xmm21[0],xmm26[1],xmm21[1],xmm26[2],xmm21[2],xmm26[3],xmm21[3],xmm26[4],xmm21[4],xmm26[5],xmm21[5],xmm26[6],xmm21[6],xmm26[7],xmm21[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,4,5,5,7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm20[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm27, %ymm23, %ymm23 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm20, %ymm6, %ymm20 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20 -; AVX512BW-NEXT: vmovdqu16 %zmm17, %zmm20 {%k1} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm24[0],xmm19[0],xmm24[1],xmm19[1],xmm24[2],xmm19[2],xmm24[3],xmm19[3],xmm24[4],xmm19[4],xmm24[5],xmm19[5],xmm24[6],xmm19[6],xmm24[7],xmm19[7] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm23 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm17 = xmm17[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm17 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm23, %zmm17 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm23 = xmm25[0],xmm22[0],xmm25[1],xmm22[1],xmm25[2],xmm22[2],xmm25[3],xmm22[3],xmm25[4],xmm22[4],xmm25[5],xmm22[5],xmm25[6],xmm22[6],xmm25[7],xmm22[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm23[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm28 = xmm23[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm28, %ymm27, %ymm27 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm27[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm23, %ymm11, %ymm23 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm27, %zmm23, %zmm23 -; AVX512BW-NEXT: vmovdqu16 %zmm23, %zmm17 {%k2} -; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm17 {%k3} +; AVX512BW-NEXT: vmovdqa32 %zmm25, %zmm16 {%k3} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm20[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm25, %ymm21, %ymm21 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm20, %ymm2, %ymm20 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm21, %zmm20, %zmm20 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm21 = xmm23[0],xmm19[0],xmm23[1],xmm19[1],xmm23[2],xmm19[2],xmm23[3],xmm19[3],xmm23[4],xmm19[4],xmm23[5],xmm19[5],xmm23[6],xmm19[6],xmm23[7],xmm19[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm21[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm21[0,1,2,3,6,5,7,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm21, %ymm5, %ymm21 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm25, %zmm21, %zmm21 +; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm21 {%k1} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm24[0],xmm17[0],xmm24[1],xmm17[1],xmm24[2],xmm17[2],xmm24[3],xmm17[3],xmm24[4],xmm17[4],xmm24[5],xmm17[5],xmm24[6],xmm17[6],xmm24[7],xmm17[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm20[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512BW-NEXT: vmovdqa64 16(%rdi), %xmm26 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm20, %ymm9, %ymm20 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm25, %zmm20, %zmm25 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm26[0],xmm22[0],xmm26[1],xmm22[1],xmm26[2],xmm22[2],xmm26[3],xmm22[3],xmm26[4],xmm22[4],xmm26[5],xmm22[5],xmm26[6],xmm22[6],xmm26[7],xmm22[7] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm20 = xmm20[0],zero,zero,zero,xmm20[1],zero,zero,zero,xmm20[2],zero,zero,zero,xmm20[3],zero,zero,zero,xmm20[4],zero,zero,zero,xmm20[5],zero,zero,zero,xmm20[6],zero,zero,zero,xmm20[7],zero,zero,zero +; AVX512BW-NEXT: vmovdqu16 %zmm25, %zmm20 {%k2} +; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k3} ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm12[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm20, %ymm13, %ymm13 +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm12[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm13, %ymm13 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm12, %ymm3, %ymm12 +; AVX512BW-NEXT: vpermw %ymm12, %ymm2, %ymm12 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm26[8],xmm21[8],xmm26[9],xmm21[9],xmm26[10],xmm21[10],xmm26[11],xmm21[11],xmm26[12],xmm21[12],xmm26[13],xmm21[13],xmm26[14],xmm21[14],xmm26[15],xmm21[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm13[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm23[8],xmm19[8],xmm23[9],xmm19[9],xmm23[10],xmm19[10],xmm23[11],xmm19[11],xmm23[12],xmm19[12],xmm23[13],xmm19[13],xmm23[14],xmm19[14],xmm23[15],xmm19[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm13[0,1,2,3,4,5,5,7] ; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm13[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm20, %ymm20 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vpermw %ymm13, %ymm6, %ymm13 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm20, %zmm13, %zmm13 +; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vpermw %ymm13, %ymm5, %ymm13 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm19, %zmm13, %zmm13 ; AVX512BW-NEXT: vmovdqu16 %zmm12, %zmm13 {%k1} -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm24[8],xmm19[8],xmm24[9],xmm19[9],xmm24[10],xmm19[10],xmm24[11],xmm19[11],xmm24[12],xmm19[12],xmm24[13],xmm19[13],xmm24[14],xmm19[14],xmm24[15],xmm19[15] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm12, %zmm19, %zmm12 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm19[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti32x4 $1, %xmm21, %ymm20, %ymm20 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm20, %zmm19, %zmm19 -; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm12 {%k2} -; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm12 {%k3} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm0[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti128 $1, %xmm13, %ymm2, %ymm2 -; AVX512BW-NEXT: vpermw %ymm0, %ymm3, %ymm0 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm24[8],xmm17[8],xmm24[9],xmm17[9],xmm24[10],xmm17[10],xmm24[11],xmm17[11],xmm24[12],xmm17[12],xmm24[13],xmm17[13],xmm24[14],xmm17[14],xmm24[15],xmm17[15] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm12[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm12[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti32x4 $1, %xmm19, %ymm17, %ymm17 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[2,1,3,3,6,5,7,7] +; AVX512BW-NEXT: vpermw %ymm12, %ymm9, %ymm12 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm17, %zmm12, %zmm12 +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm26[8],xmm22[8],xmm26[9],xmm22[9],xmm26[10],xmm22[10],xmm26[11],xmm22[11],xmm26[12],xmm22[12],xmm26[13],xmm22[13],xmm26[14],xmm22[14],xmm26[15],xmm22[15] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm17 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero,xmm17[4],zero,zero,zero,xmm17[5],zero,zero,zero,xmm17[6],zero,zero,zero,xmm17[7],zero,zero,zero +; AVX512BW-NEXT: vmovdqu16 %zmm12, %zmm17 {%k2} +; AVX512BW-NEXT: vmovdqa32 %zmm13, %zmm17 {%k3} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm0[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti128 $1, %xmm12, %ymm1, %ymm1 +; AVX512BW-NEXT: vpermw %ymm0, %ymm2, %ymm0 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,5,7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,7,7] +; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512BW-NEXT: vpermw %ymm1, %ymm5, %ymm1 ; AVX512BW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5] +; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,6,6,7] +; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512BW-NEXT: vpermw %ymm0, %ymm9, %ymm0 +; AVX512BW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,1,3,3,6,5,7,7] ; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,5,5,7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,7,7] -; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512BW-NEXT: vpermw %ymm2, %ymm6, %ymm2 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1} -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,4,6,5] -; AVX512BW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,6,6,7] -; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4 -; AVX512BW-NEXT: vpermw %ymm3, %ymm11, %ymm3 -; AVX512BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7] -; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3 -; AVX512BW-NEXT: vmovdqu16 %zmm3, %zmm0 {%k2} -; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3} +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] +; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k2} +; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm2 {%k3} ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rax) -; AVX512BW-NEXT: vmovdqa64 %zmm12, 192(%rax) -; AVX512BW-NEXT: vmovdqa64 %zmm17, 128(%rax) +; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rax) +; AVX512BW-NEXT: vmovdqa64 %zmm17, 192(%rax) +; AVX512BW-NEXT: vmovdqa64 %zmm20, 128(%rax) ; AVX512BW-NEXT: vmovdqa64 %zmm16, 320(%rax) ; AVX512BW-NEXT: vmovdqa64 %zmm18, 256(%rax) ; AVX512BW-NEXT: vmovdqa64 %zmm15, 448(%rax) ; AVX512BW-NEXT: vmovdqa64 %zmm14, 384(%rax) -; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rax) +; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -8608,284 +8528,260 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec ; AVX512DQ-BW-NEXT: vmovdqa 16(%r10), %xmm12 ; AVX512DQ-BW-NEXT: vmovdqa64 32(%r10), %xmm16 ; AVX512DQ-BW-NEXT: vmovdqa 48(%r10), %xmm15 -; AVX512DQ-BW-NEXT: vmovdqa (%rax), %xmm2 +; AVX512DQ-BW-NEXT: vmovdqa (%rax), %xmm1 ; AVX512DQ-BW-NEXT: vmovdqa 16(%rax), %xmm13 ; AVX512DQ-BW-NEXT: vmovdqa64 32(%rax), %xmm17 ; AVX512DQ-BW-NEXT: vmovdqa64 48(%rax), %xmm18 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [0,0,0,0,4,5,2,1,0,2,0,2,4,5,2,3] -; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm3, %ymm1 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm1 -; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm4 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm2[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,0,0,0,4,5,2,1,0,2,0,2,4,5,2,3] +; AVX512DQ-BW-NEXT: vpermw %ymm3, %ymm2, %ymm3 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm6 +; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm3 ; AVX512DQ-BW-NEXT: vmovdqa64 48(%r9), %xmm19 -; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm5 -; AVX512DQ-BW-NEXT: vmovdqa64 48(%r8), %xmm21 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm4 +; AVX512DQ-BW-NEXT: vmovdqa64 48(%r8), %xmm20 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,4,5,5,7] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm8, %ymm6, %ymm6 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm6[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [0,1,0,1,4,5,1,3,2,1,2,1,4,5,3,3] -; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm6, %ymm7 +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm8, %ymm5, %ymm5 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm5 = [0,1,0,1,4,5,1,3,2,1,2,1,4,5,3,3] +; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm5, %ymm7 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm14 ; AVX512DQ-BW-NEXT: movl $-2004318072, %eax # imm = 0x88888888 ; AVX512DQ-BW-NEXT: kmovd %eax, %k1 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm14 {%k1} -; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm7 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm6, %zmm14 {%k1} +; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm6 +; AVX512DQ-BW-NEXT: vmovdqa64 48(%rcx), %xmm21 +; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm8 +; AVX512DQ-BW-NEXT: vmovdqa64 48(%rdx), %xmm23 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm10, %ymm9, %ymm9 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm9 = [0,0,2,1,2,1,6,7,0,2,2,3,2,3,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm9, %ymm7 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm7, %zmm22 +; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm10 ; AVX512DQ-BW-NEXT: vmovdqa64 48(%rsi), %xmm24 -; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm8 -; AVX512DQ-BW-NEXT: vmovdqa64 48(%rdi), %xmm26 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm9 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1 -; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm9 -; AVX512DQ-BW-NEXT: vmovdqa64 48(%rcx), %xmm28 -; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm10 -; AVX512DQ-BW-NEXT: vmovdqa64 48(%rdx), %xmm30 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm20[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm22, %ymm11, %ymm11 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm11[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,0,2,1,2,1,6,7,0,2,2,3,2,3,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm22, %zmm20, %zmm20 +; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm11 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero,xmm7[4],zero,zero,zero,xmm7[5],zero,zero,zero,xmm7[6],zero,zero,zero,xmm7[7],zero,zero,zero ; AVX512DQ-BW-NEXT: movl $572662306, %eax # imm = 0x22222222 ; AVX512DQ-BW-NEXT: kmovd %eax, %k2 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm20, %zmm1 {%k2} +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm22, %zmm7 {%k2} ; AVX512DQ-BW-NEXT: movw $-21846, %ax # imm = 0xAAAA ; AVX512DQ-BW-NEXT: kmovd %eax, %k3 -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm14, %zmm1 {%k3} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm14, %zmm7 {%k3} ; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm18[0],xmm15[0],xmm18[1],xmm15[1],xmm18[2],xmm15[2],xmm18[3],xmm15[3],xmm18[4],xmm15[4],xmm18[5],xmm15[5],xmm18[6],xmm15[6],xmm18[7],xmm15[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm14[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm22, %ymm20, %ymm20 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm3, %ymm14 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm20, %zmm14, %zmm14 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm21[0],xmm19[0],xmm21[1],xmm19[1],xmm21[2],xmm19[2],xmm21[3],xmm19[3],xmm21[4],xmm19[4],xmm21[5],xmm19[5],xmm21[6],xmm19[6],xmm21[7],xmm19[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,5,5,7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm23, %ymm22, %ymm22 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm14[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm22 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm22[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm6, %ymm20 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm22, %zmm20, %zmm23 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm14, %zmm23 {%k1} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm20 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm14 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm20, %zmm14 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm30[0],xmm28[0],xmm30[1],xmm28[1],xmm30[2],xmm28[2],xmm30[3],xmm28[3],xmm30[4],xmm28[4],xmm30[5],xmm28[5],xmm30[6],xmm28[6],xmm30[7],xmm28[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm20[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm25 +; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm2, %ymm14 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm22, %zmm14, %zmm14 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm22 = xmm20[0],xmm19[0],xmm20[1],xmm19[1],xmm20[2],xmm19[2],xmm20[3],xmm19[3],xmm20[4],xmm19[4],xmm20[5],xmm19[5],xmm20[6],xmm19[6],xmm20[7],xmm19[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm22[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm22[0,1,2,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm22, %ymm5, %ymm22 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm25, %zmm22, %zmm25 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm14, %zmm25 {%k1} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm23[0],xmm21[0],xmm23[1],xmm21[1],xmm23[2],xmm21[2],xmm23[3],xmm21[3],xmm23[4],xmm21[4],xmm23[5],xmm21[5],xmm23[6],xmm21[6],xmm23[7],xmm21[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm14[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm14[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm26, %ymm22, %ymm22 +; AVX512DQ-BW-NEXT: vmovdqa64 48(%rdi), %xmm27 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm22 = ymm22[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm9, %ymm14 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm22, %zmm14, %zmm22 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm27[0],xmm24[0],xmm27[1],xmm24[1],xmm27[2],xmm24[2],xmm27[3],xmm24[3],xmm27[4],xmm24[4],xmm27[5],xmm24[5],xmm27[6],xmm24[6],xmm27[7],xmm24[7] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm14 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero,xmm14[4],zero,zero,zero,xmm14[5],zero,zero,zero,xmm14[6],zero,zero,zero,xmm14[7],zero,zero,zero +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm22, %zmm14 {%k2} ; AVX512DQ-BW-NEXT: vmovdqa64 32(%r9), %xmm22 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm25, %zmm20, %zmm20 -; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm25 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm20, %zmm14 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %xmm20 -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm23, %zmm14 {%k3} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm14 {%k3} ; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm18[8],xmm15[8],xmm18[9],xmm15[9],xmm18[10],xmm15[10],xmm18[11],xmm15[11],xmm18[12],xmm15[12],xmm18[13],xmm15[13],xmm18[14],xmm15[14],xmm18[15],xmm15[15] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm18 = xmm15[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm15[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm23, %ymm18, %ymm18 -; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm23 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm15[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm25, %ymm18, %ymm18 +; AVX512DQ-BW-NEXT: vmovdqa64 32(%r8), %xmm25 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm18 = ymm18[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm15, %ymm3, %ymm15 +; AVX512DQ-BW-NEXT: vpermw %ymm15, %ymm2, %ymm15 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm18, %zmm15, %zmm15 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm21[8],xmm19[8],xmm21[9],xmm19[9],xmm21[10],xmm19[10],xmm21[11],xmm19[11],xmm21[12],xmm19[12],xmm21[13],xmm19[13],xmm21[14],xmm19[14],xmm21[15],xmm19[15] +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm18 = xmm20[8],xmm19[8],xmm20[9],xmm19[9],xmm20[10],xmm19[10],xmm20[11],xmm19[11],xmm20[12],xmm19[12],xmm20[13],xmm19[13],xmm20[14],xmm19[14],xmm20[15],xmm19[15] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,5,5,7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm18[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 -; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm27 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm18[0,1,2,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm20, %ymm19, %ymm19 +; AVX512DQ-BW-NEXT: vmovdqa64 32(%rcx), %xmm20 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm18, %ymm6, %ymm18 +; AVX512DQ-BW-NEXT: vpermw %ymm18, %ymm5, %ymm18 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm18 -; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm29 +; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdx), %xmm26 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm15, %zmm18 {%k1} -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm15 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm15, %zmm19, %zmm15 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm19[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm21, %ymm21 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm21, %zmm19, %zmm19 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm23[8],xmm21[8],xmm23[9],xmm21[9],xmm23[10],xmm21[10],xmm23[11],xmm21[11],xmm23[12],xmm21[12],xmm23[13],xmm21[13],xmm23[14],xmm21[14],xmm23[15],xmm21[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm15[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm15[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512DQ-BW-NEXT: vmovdqa64 32(%rsi), %xmm21 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm15, %ymm9, %ymm15 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm15, %zmm19 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm27[8],xmm24[8],xmm27[9],xmm24[9],xmm27[10],xmm24[10],xmm27[11],xmm24[11],xmm27[12],xmm24[12],xmm27[13],xmm24[13],xmm27[14],xmm24[14],xmm27[15],xmm24[15] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm15 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero,xmm15[4],zero,zero,zero,xmm15[5],zero,zero,zero,xmm15[6],zero,zero,zero,xmm15[7],zero,zero,zero ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm19, %zmm15 {%k2} ; AVX512DQ-BW-NEXT: vmovdqa32 %zmm18, %zmm15 {%k3} ; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3],xmm17[4],xmm16[4],xmm17[5],xmm16[5],xmm17[6],xmm16[6],xmm17[7],xmm16[7] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm18[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm18[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm23, %ymm19, %ymm19 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm18, %ymm3, %ymm18 +; AVX512DQ-BW-NEXT: vpermw %ymm18, %ymm2, %ymm18 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm18 ; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm25[0],xmm22[0],xmm25[1],xmm22[1],xmm25[2],xmm22[2],xmm25[3],xmm22[3],xmm25[4],xmm22[4],xmm25[5],xmm22[5],xmm25[6],xmm22[6],xmm25[7],xmm22[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm19[0,1,2,3,4,5,5,7] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm19[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm21, %ymm21 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm19, %ymm6, %ymm19 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm21, %zmm19, %zmm24 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm18, %zmm24 {%k1} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm23[0],xmm20[0],xmm23[1],xmm20[1],xmm23[2],xmm20[2],xmm23[3],xmm20[3],xmm23[4],xmm20[4],xmm23[5],xmm20[5],xmm23[6],xmm20[6],xmm23[7],xmm20[7] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm18 = xmm18[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm18 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm18, %zmm19, %zmm18 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm19 = xmm29[0],xmm27[0],xmm29[1],xmm27[1],xmm29[2],xmm27[2],xmm29[3],xmm27[3],xmm29[4],xmm27[4],xmm29[5],xmm27[5],xmm29[6],xmm27[6],xmm29[7],xmm27[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm19[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm26, %ymm21, %ymm26 -; AVX512DQ-BW-NEXT: vmovdqa64 16(%r9), %xmm21 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm26 = ymm26[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm26, %zmm19, %zmm19 -; AVX512DQ-BW-NEXT: vmovdqa64 16(%r8), %xmm26 +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm23, %ymm23 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm19, %ymm5, %ymm19 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm23, %zmm19, %zmm23 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm18, %zmm23 {%k1} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm26[0],xmm20[0],xmm26[1],xmm20[1],xmm26[2],xmm20[2],xmm26[3],xmm20[3],xmm26[4],xmm20[4],xmm26[5],xmm20[5],xmm26[6],xmm20[6],xmm26[7],xmm20[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm18[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm18[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm19, %ymm19 +; AVX512DQ-BW-NEXT: vmovdqa64 32(%rdi), %xmm27 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm18, %ymm9, %ymm18 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm18, %zmm19 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm27[0],xmm21[0],xmm27[1],xmm21[1],xmm27[2],xmm21[2],xmm27[3],xmm21[3],xmm27[4],xmm21[4],xmm27[5],xmm21[5],xmm27[6],xmm21[6],xmm27[7],xmm21[7] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm18 = xmm18[0],zero,zero,zero,xmm18[1],zero,zero,zero,xmm18[2],zero,zero,zero,xmm18[3],zero,zero,zero,xmm18[4],zero,zero,zero,xmm18[5],zero,zero,zero,xmm18[6],zero,zero,zero,xmm18[7],zero,zero,zero ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm19, %zmm18 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa64 16(%rsi), %xmm19 -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm24, %zmm18 {%k3} +; AVX512DQ-BW-NEXT: vmovdqa64 16(%r9), %xmm19 +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm23, %zmm18 {%k3} ; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm16[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm16[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm17, %ymm17 -; AVX512DQ-BW-NEXT: vmovdqa64 16(%rdi), %xmm24 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm16[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm23, %ymm17, %ymm17 +; AVX512DQ-BW-NEXT: vmovdqa64 16(%r8), %xmm23 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm3, %ymm16 +; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm2, %ymm16 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm16, %zmm16 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm17[0,1,2,3,4,5,5,7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm17[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm25, %ymm22, %ymm25 -; AVX512DQ-BW-NEXT: vmovdqa64 16(%rcx), %xmm22 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm6, %ymm17 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm25, %zmm17, %zmm17 -; AVX512DQ-BW-NEXT: vmovdqa64 16(%rdx), %xmm25 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm16, %zmm17 {%k1} -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm23[8],xmm20[8],xmm23[9],xmm20[9],xmm23[10],xmm20[10],xmm23[11],xmm20[11],xmm23[12],xmm20[12],xmm23[13],xmm20[13],xmm23[14],xmm20[14],xmm23[15],xmm20[15] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm20 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm16 = xmm16[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm16 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm16, %zmm20, %zmm16 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm20 = xmm29[8],xmm27[8],xmm29[9],xmm27[9],xmm29[10],xmm27[10],xmm29[11],xmm27[11],xmm29[12],xmm27[12],xmm29[13],xmm27[13],xmm29[14],xmm27[14],xmm29[15],xmm27[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm20[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm27, %ymm23, %ymm23 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm11, %ymm20 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm22 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm22[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm24 = xmm22[0,1,2,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm24, %ymm17, %ymm24 +; AVX512DQ-BW-NEXT: vmovdqa64 16(%rcx), %xmm17 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm24 = ymm24[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm22, %ymm5, %ymm22 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm24, %zmm22, %zmm25 +; AVX512DQ-BW-NEXT: vmovdqa64 16(%rdx), %xmm24 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm16, %zmm25 {%k1} +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm20[8],xmm26[9],xmm20[9],xmm26[10],xmm20[10],xmm26[11],xmm20[11],xmm26[12],xmm20[12],xmm26[13],xmm20[13],xmm26[14],xmm20[14],xmm26[15],xmm20[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm16[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm22 = xmm16[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm22, %ymm20, %ymm20 +; AVX512DQ-BW-NEXT: vmovdqa64 16(%rsi), %xmm22 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm9, %ymm16 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm20, %zmm16, %zmm20 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm16 = xmm27[8],xmm21[8],xmm27[9],xmm21[9],xmm27[10],xmm21[10],xmm27[11],xmm21[11],xmm27[12],xmm21[12],xmm27[13],xmm21[13],xmm27[14],xmm21[14],xmm27[15],xmm21[15] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm16 = xmm16[0],zero,zero,zero,xmm16[1],zero,zero,zero,xmm16[2],zero,zero,zero,xmm16[3],zero,zero,zero,xmm16[4],zero,zero,zero,xmm16[5],zero,zero,zero,xmm16[6],zero,zero,zero,xmm16[7],zero,zero,zero ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm20, %zmm16 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm17, %zmm16 {%k3} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm17[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm17[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm23, %ymm20, %ymm20 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm17, %ymm3, %ymm17 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm20, %zmm17, %zmm17 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm26[0],xmm21[0],xmm26[1],xmm21[1],xmm26[2],xmm21[2],xmm26[3],xmm21[3],xmm26[4],xmm21[4],xmm26[5],xmm21[5],xmm26[6],xmm21[6],xmm26[7],xmm21[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm23 = xmm20[0,1,2,3,4,5,5,7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm20[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm27, %ymm23, %ymm23 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm6, %ymm20 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm23, %zmm20, %zmm20 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm17, %zmm20 {%k1} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm24[0],xmm19[0],xmm24[1],xmm19[1],xmm24[2],xmm19[2],xmm24[3],xmm19[3],xmm24[4],xmm19[4],xmm24[5],xmm19[5],xmm24[6],xmm19[6],xmm24[7],xmm19[7] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm23 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm17 = xmm17[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm17 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm23, %zmm17 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm23 = xmm25[0],xmm22[0],xmm25[1],xmm22[1],xmm25[2],xmm22[2],xmm25[3],xmm22[3],xmm25[4],xmm22[4],xmm25[5],xmm22[5],xmm25[6],xmm22[6],xmm25[7],xmm22[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm27 = xmm23[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm28 = xmm23[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm28, %ymm27, %ymm27 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm27 = ymm27[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm23, %ymm11, %ymm23 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm27, %zmm23, %zmm23 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm23, %zmm17 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm20, %zmm17 {%k3} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm25, %zmm16 {%k3} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm20[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm25, %ymm21, %ymm21 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm21 = ymm21[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm2, %ymm20 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm21, %zmm20, %zmm20 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm21 = xmm23[0],xmm19[0],xmm23[1],xmm19[1],xmm23[2],xmm19[2],xmm23[3],xmm19[3],xmm23[4],xmm19[4],xmm23[5],xmm19[5],xmm23[6],xmm19[6],xmm23[7],xmm19[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm21[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm21[0,1,2,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm21, %ymm5, %ymm21 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm25, %zmm21, %zmm21 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm20, %zmm21 {%k1} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm24[0],xmm17[0],xmm24[1],xmm17[1],xmm24[2],xmm17[2],xmm24[3],xmm17[3],xmm24[4],xmm17[4],xmm24[5],xmm17[5],xmm24[6],xmm17[6],xmm24[7],xmm17[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm25 = xmm20[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm26 = xmm20[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm26, %ymm25, %ymm25 +; AVX512DQ-BW-NEXT: vmovdqa64 16(%rdi), %xmm26 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm20, %ymm9, %ymm20 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm25, %zmm20, %zmm25 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm20 = xmm26[0],xmm22[0],xmm26[1],xmm22[1],xmm26[2],xmm22[2],xmm26[3],xmm22[3],xmm26[4],xmm22[4],xmm26[5],xmm22[5],xmm26[6],xmm22[6],xmm26[7],xmm22[7] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm20 = xmm20[0],zero,zero,zero,xmm20[1],zero,zero,zero,xmm20[2],zero,zero,zero,xmm20[3],zero,zero,zero,xmm20[4],zero,zero,zero,xmm20[5],zero,zero,zero,xmm20[6],zero,zero,zero,xmm20[7],zero,zero,zero +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm25, %zmm20 {%k2} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm21, %zmm20 {%k3} ; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm12[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm20, %ymm13, %ymm13 +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm12[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm13, %ymm13 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm12, %ymm3, %ymm12 +; AVX512DQ-BW-NEXT: vpermw %ymm12, %ymm2, %ymm12 ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm26[8],xmm21[8],xmm26[9],xmm21[9],xmm26[10],xmm21[10],xmm26[11],xmm21[11],xmm26[12],xmm21[12],xmm26[13],xmm21[13],xmm26[14],xmm21[14],xmm26[15],xmm21[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm13[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm23[8],xmm19[8],xmm23[9],xmm19[9],xmm23[10],xmm19[10],xmm23[11],xmm19[11],xmm23[12],xmm19[12],xmm23[13],xmm19[13],xmm23[14],xmm19[14],xmm23[15],xmm19[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm13[0,1,2,3,4,5,5,7] ; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm13[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm20, %ymm20 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm6, %ymm13 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm20, %zmm13, %zmm13 +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm19, %ymm19 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm19 = ymm19[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vpermw %ymm13, %ymm5, %ymm13 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm19, %zmm13, %zmm13 ; AVX512DQ-BW-NEXT: vmovdqu16 %zmm12, %zmm13 {%k1} -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm24[8],xmm19[8],xmm24[9],xmm19[9],xmm24[10],xmm19[10],xmm24[11],xmm19[11],xmm24[12],xmm19[12],xmm24[13],xmm19[13],xmm24[14],xmm19[14],xmm24[15],xmm19[15] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm19 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm12, %zmm19, %zmm12 -; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm19 = xmm25[8],xmm22[8],xmm25[9],xmm22[9],xmm25[10],xmm22[10],xmm25[11],xmm22[11],xmm25[12],xmm22[12],xmm25[13],xmm22[13],xmm25[14],xmm22[14],xmm25[15],xmm22[15] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm20 = xmm19[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm21 = xmm19[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm21, %ymm20, %ymm20 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm20 = ymm20[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vpermw %ymm19, %ymm11, %ymm19 -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm20, %zmm19, %zmm19 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm19, %zmm12 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm13, %zmm12 {%k3} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm0[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm13, %ymm2, %ymm2 -; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm3, %ymm0 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm24[8],xmm17[8],xmm24[9],xmm17[9],xmm24[10],xmm17[10],xmm24[11],xmm17[11],xmm24[12],xmm17[12],xmm24[13],xmm17[13],xmm24[14],xmm17[14],xmm24[15],xmm17[15] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm17 = xmm12[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm19 = xmm12[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti32x4 $1, %xmm19, %ymm17, %ymm17 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm17 = ymm17[2,1,3,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vpermw %ymm12, %ymm9, %ymm12 +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm17, %zmm12, %zmm12 +; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm17 = xmm26[8],xmm22[8],xmm26[9],xmm22[9],xmm26[10],xmm22[10],xmm26[11],xmm22[11],xmm26[12],xmm22[12],xmm26[13],xmm22[13],xmm26[14],xmm22[14],xmm26[15],xmm22[15] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm17 = xmm17[0],zero,zero,zero,xmm17[1],zero,zero,zero,xmm17[2],zero,zero,zero,xmm17[3],zero,zero,zero,xmm17[4],zero,zero,zero,xmm17[5],zero,zero,zero,xmm17[6],zero,zero,zero,xmm17[7],zero,zero,zero +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm12, %zmm17 {%k2} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm13, %zmm17 {%k3} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm0[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm12, %ymm1, %ymm1 +; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm2, %ymm0 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,5,7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,7,7] +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512DQ-BW-NEXT: vpermw %ymm1, %ymm5, %ymm1 ; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5] +; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,6,6,7] +; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm9, %ymm0 +; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,1,3,3,6,5,7,7] ; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,5,5,7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 -; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm6, %ymm2 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1} -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 -; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,4,6,5] -; AVX512DQ-BW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,6,6,7] -; AVX512DQ-BW-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4 -; AVX512DQ-BW-NEXT: vpermw %ymm3, %ymm11, %ymm3 -; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7] -; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3 -; AVX512DQ-BW-NEXT: vmovdqu16 %zmm3, %zmm0 {%k2} -; AVX512DQ-BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3} +; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] +; AVX512DQ-BW-NEXT: vpmovzxwq {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k2} +; AVX512DQ-BW-NEXT: vmovdqa32 %zmm1, %zmm2 {%k3} ; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, (%rax) -; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, 192(%rax) -; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 128(%rax) +; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%rax) +; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 192(%rax) +; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 128(%rax) ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 320(%rax) ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, 256(%rax) ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 448(%rax) ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, 384(%rax) -; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 64(%rax) +; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 64(%rax) ; AVX512DQ-BW-NEXT: vzeroupper ; AVX512DQ-BW-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-partial-undef.ll b/llvm/test/CodeGen/X86/vector-partial-undef.ll index fd41fd53e3be1..4753dba2d468f 100644 --- a/llvm/test/CodeGen/X86/vector-partial-undef.ll +++ b/llvm/test/CodeGen/X86/vector-partial-undef.ll @@ -151,9 +151,9 @@ define <8 x i32> @xor_undef_elts_alt(<4 x i32> %x) { ; AVX: # %bb.0: ; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [6,1,5,4,3,2,0,7] ; AVX-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: retq %extend = shufflevector <4 x i32> %x, <4 x i32> undef, <8 x i32> %bogus_bo = xor <8 x i32> %extend, diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 07c770abc65d6..05b0a7c10b4e1 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2469,10 +2469,10 @@ define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) { ; ; AVX2-FAST-ALL-LABEL: combine_unneeded_subvector1: ; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4] ; AVX2-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1] ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 +; AVX2-FAST-ALL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX2-FAST-ALL-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: combine_unneeded_subvector1: diff --git a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll index bc1756f6ca9d1..43fbcfcc600c6 100644 --- a/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll +++ b/llvm/test/Transforms/DeadStoreElimination/OverwriteStoreBegin.ll @@ -402,3 +402,33 @@ entry: store i64 1, ptr %p, align 1 ret void } + +; Verify that we adjust/drop the dereferenceable attribute. +define void @dereferenceable(ptr nocapture %p) { +; CHECK-LABEL: @dereferenceable( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 4 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 24, i1 false) +; CHECK-NEXT: store i32 1, ptr [[P]], align 4 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.memset.p0.i64(ptr dereferenceable(28) align 4 %p, i8 0, i64 28, i1 false) + store i32 1, ptr %p, align 4 + ret void +} + +; Verify that we adjust/drop the dereferenceable_or_null attribute. +define void @dereferenceable_or_null(ptr nocapture %p) { +; CHECK-LABEL: @dereferenceable_or_null( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 20, i1 false) +; CHECK-NEXT: store i64 1, ptr [[P]], align 4 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.memset.p0.i64(ptr dereferenceable_or_null(28) align 4 %p, i8 0, i64 28, i1 false) + store i64 1, ptr %p, align 4 + ret void +} diff --git a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll index 59ec2d47bc72c..9ef153183cc9e 100644 --- a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll +++ b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll @@ -9,8 +9,8 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, ptr %q) { ; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) ; FNATTRS-LABEL: define {{[^@]+}}@test_make_buffer_rsrc ; FNATTRS-SAME: (ptr readonly captures(none) [[P:%.*]], ptr writeonly captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] { -; FNATTRS-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P]], i16 0, i32 4, i32 822243328) -; FNATTRS-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) +; FNATTRS-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328) +; FNATTRS-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) ; FNATTRS-NEXT: [[V:%.*]] = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) [[P_RSRC]], i32 0, i32 0, i32 0) ; FNATTRS-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], ptr addrspace(8) [[Q_RSRC]], i32 0, i32 0, i32 0) ; FNATTRS-NEXT: ret void @@ -18,21 +18,21 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, ptr %q) { ; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) ; ATTRIBUTOR-LABEL: define {{[^@]+}}@test_make_buffer_rsrc ; ATTRIBUTOR-SAME: (ptr nofree readonly captures(none) [[P:%.*]], ptr nofree writeonly captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] { -; ATTRIBUTOR-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P]], i16 0, i32 4, i32 822243328) #[[ATTR4:[0-9]+]] -; ATTRIBUTOR-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) #[[ATTR4]] +; ATTRIBUTOR-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328) #[[ATTR4:[0-9]+]] +; ATTRIBUTOR-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) #[[ATTR4]] ; ATTRIBUTOR-NEXT: [[V:%.*]] = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) readonly captures(none) [[P_RSRC]], i32 0, i32 0, i32 0) #[[ATTR5:[0-9]+]] ; ATTRIBUTOR-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], ptr addrspace(8) writeonly captures(none) [[Q_RSRC]], i32 0, i32 0, i32 0) #[[ATTR6:[0-9]+]] ; ATTRIBUTOR-NEXT: ret void ; - %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 0, i32 4, i32 822243328) - %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %q, i16 0, i32 4, i32 822243328) + %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 4, i32 822243328) + %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %q, i16 0, i32 4, i32 822243328) %v = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) %p.rsrc, i32 0, i32 0, i32 0) call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 %v, ptr addrspace(8) %q.rsrc, i32 0, i32 0, i32 0) ret void } ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr readnone, i16, i32, i32) #0 +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone, i16, i32, i32) #0 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: read) declare i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) #1 diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-counting-elems.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-counting-elems.ll index 4e7e9eeb7250b..46ca99f4bb27b 100644 --- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-counting-elems.ll +++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-counting-elems.ll @@ -240,6 +240,23 @@ define i64 @cntd_all() { } +define i64 @udiv() vscale_range(1, 16) { +; CHECK-LABEL: @udiv( +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[A:%.*]] = shl nuw nsw i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[B:%.*]] = shl nuw nsw i64 [[TMP2]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = call range(i64 2, 65) i64 @llvm.cttz.i64(i64 [[B]], i1 true) +; CHECK-NEXT: [[C1:%.*]] = lshr i64 [[A]], [[TMP3]] +; CHECK-NEXT: ret i64 [[C1]] +; + %a = call i64 @llvm.aarch64.sve.cntb(i32 31) + %b = call i64 @llvm.aarch64.sve.cntw(i32 31) + %c = udiv i64 %a, %b + ret i64 %c +} + + declare i64 @llvm.aarch64.sve.cntb(i32 %pattern) declare i64 @llvm.aarch64.sve.cnth(i32 %pattern) declare i64 @llvm.aarch64.sve.cntw(i32 %pattern) diff --git a/llvm/test/Transforms/InstCombine/select-icmp-and.ll b/llvm/test/Transforms/InstCombine/select-icmp-and.ll index 516a1e8496b43..1218799ab3dc5 100644 --- a/llvm/test/Transforms/InstCombine/select-icmp-and.ll +++ b/llvm/test/Transforms/InstCombine/select-icmp-and.ll @@ -794,3 +794,112 @@ define i32 @select_bittest_to_shl_negative_test(i32 %x) { %res = add nuw nsw i32 %y, 2 ret i32 %res } + +define i8 @select_bittest_to_xor(i8 %x) { +; CHECK-LABEL: @select_bittest_to_xor( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -1 +; CHECK-NEXT: call void @use1(i1 [[CMP]]) +; CHECK-NEXT: [[MASKSEL:%.*]] = xor i8 [[X]], -128 +; CHECK-NEXT: ret i8 [[MASKSEL]] +; + %cmp = icmp sgt i8 %x, -1 + call void @use1(i1 %cmp) + %and = and i8 %x, 127 + %or = or i8 %x, -128 + %masksel = select i1 %cmp, i8 %or, i8 %and + ret i8 %masksel +} + +define i8 @select_trunc_bittest_to_sub(i8 %x) { +; CHECK-LABEL: @select_trunc_bittest_to_sub( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[X:%.*]] to i1 +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 3, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc i8 %x to i1 + %ret = select i1 %trunc, i8 3, i8 4 + ret i8 %ret +} + +define i8 @select_trunc_nuw_bittest_to_sub(i8 %x) { +; CHECK-LABEL: @select_trunc_nuw_bittest_to_sub( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc nuw i8 [[X:%.*]] to i1 +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 3, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc nuw i8 %x to i1 + %ret = select i1 %trunc, i8 3, i8 4 + ret i8 %ret +} + +define i8 @select_trunc_nsw_bittest_to_sub(i8 %x) { +; CHECK-LABEL: @select_trunc_nsw_bittest_to_sub( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc nsw i8 [[X:%.*]] to i1 +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 3, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc nsw i8 %x to i1 + %ret = select i1 %trunc, i8 3, i8 4 + ret i8 %ret +} + +define i8 @select_trunc_nuw_bittest_to_sub_extra_use(i8 %x) { +; CHECK-LABEL: @select_trunc_nuw_bittest_to_sub_extra_use( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc nuw i8 [[X:%.*]] to i1 +; CHECK-NEXT: call void @use1(i1 [[TRUNC]]) +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 3, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc nuw i8 %x to i1 + call void @use1(i1 %trunc) + %ret = select i1 %trunc, i8 3, i8 4 + ret i8 %ret +} + +define i8 @neg_select_trunc_bittest_to_sub_extra_use(i8 %x) { +; CHECK-LABEL: @neg_select_trunc_bittest_to_sub_extra_use( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[X:%.*]] to i1 +; CHECK-NEXT: call void @use1(i1 [[TRUNC]]) +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 3, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc i8 %x to i1 + call void @use1(i1 %trunc) + %ret = select i1 %trunc, i8 3, i8 4 + ret i8 %ret +} + +define i8 @select_trunc_nuw_bittest_to_shl_not(i8 %x) { +; CHECK-LABEL: @select_trunc_nuw_bittest_to_shl_not( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc nuw i8 [[X:%.*]] to i1 +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 0, i8 4 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc nuw i8 %x to i1 + %ret = select i1 %trunc, i8 0, i8 4 + ret i8 %ret +} + +define i8 @select_trunc_bittest_to_shl(i8 %x) { +; CHECK-LABEL: @select_trunc_bittest_to_shl( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[X:%.*]] to i1 +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 4, i8 0 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc i8 %x to i1 + %ret = select i1 %trunc, i8 4, i8 0 + ret i8 %ret +} + +define i8 @neg_select_trunc_bittest_to_shl_extra_use(i8 %x) { +; CHECK-LABEL: @neg_select_trunc_bittest_to_shl_extra_use( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[X:%.*]] to i1 +; CHECK-NEXT: call void @use1(i1 [[TRUNC]]) +; CHECK-NEXT: [[RET:%.*]] = select i1 [[TRUNC]], i8 4, i8 0 +; CHECK-NEXT: ret i8 [[RET]] +; + %trunc = trunc i8 %x to i1 + call void @use1(i1 %trunc) + %ret = select i1 %trunc, i8 4, i8 0 + ret i8 %ret +} diff --git a/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll b/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll index 63caec9501325..e82e33e9d7f04 100644 --- a/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll +++ b/llvm/test/Transforms/InstCombine/shift-cttz-ctlz.ll @@ -103,4 +103,34 @@ entry: ret i32 %res } +define i64 @fold_cttz_64() vscale_range(1,16) { +; CHECK-LABEL: define i64 @fold_cttz_64( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i64 4 +; +entry: + %vscale = tail call i64 @llvm.vscale.i64() + %shl0 = shl nuw nsw i64 %vscale, 4 + %shl1 = shl nuw nsw i64 %vscale, 2 + %cttz = tail call range(i64 2, 65) i64 @llvm.cttz.i64(i64 %shl1, i1 true) + %div1 = lshr i64 %shl0, %cttz + ret i64 %div1 +} + +define i32 @fold_cttz_32() vscale_range(1,16) { +; CHECK-LABEL: define i32 @fold_cttz_32( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 4 +; +entry: + %vscale = tail call i32 @llvm.vscale.i32() + %shl0 = shl nuw nsw i32 %vscale, 4 + %shl1 = shl nuw nsw i32 %vscale, 2 + %cttz = tail call range(i32 2, 65) i32 @llvm.cttz.i32(i32 %shl1, i1 true) + %div1 = lshr i32 %shl0, %cttz + ret i32 %div1 +} + declare void @use(i32) diff --git a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll index 2e539d03afc1c..e69da434c0caf 100644 --- a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll +++ b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll @@ -165,8 +165,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr addrspace(1) noalias %p ; CHECK-LABEL: define void @hoistable_buffer_construction_intrinsic ; CHECK-SAME: (ptr addrspace(1) noalias [[P_GLOBAL:%.*]], ptr addrspace(1) noalias [[Q_GLOBAL:%.*]], i32 [[BOUND:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[P:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i32 0, i32 0) -; CHECK-NEXT: [[Q:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i32 0, i32 0) +; CHECK-NEXT: [[P:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i32 0, i32 0) +; CHECK-NEXT: [[Q:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i32 0, i32 0) ; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0) ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: @@ -181,8 +181,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr addrspace(1) noalias %p ; CHECK-NEXT: ret void ; entry: - %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %p.global, i16 0, i32 0, i32 0) - %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %q.global, i16 0, i32 0, i32 0) + %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p.global, i16 0, i32 0, i32 0) + %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %q.global, i16 0, i32 0, i32 0) br label %loop loop: %i = phi i32 [0, %entry], [%next, %loop] @@ -256,8 +256,8 @@ declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) nocapture read declare i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) nocapture readonly, i32, i32, i32, i32 immarg) #0 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: write) declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8) nocapture writeonly, i32, i32, i32 immarg) #1 -; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) #2 -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone nocapture, i16, i32, i32) +; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) #2 +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) readnone nocapture, i16, i32, i32) attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) } attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: write) } attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index ebb5d46cd8438..4e862bf2f7480 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -46,7 +46,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 ; CHECK-NEXT: LV: Using user VF vscale x 4. ; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 +; CHECK: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 ; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 ; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom ; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom @@ -295,7 +295,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 ; CHECK-NEXT: LV: Using user VF vscale x 4. ; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 +; CHECK: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 ; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 ; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom ; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/vscale.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/vscale.ll new file mode 100644 index 0000000000000..7aa50ddf61468 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/vscale.ll @@ -0,0 +1,15 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="default" -mattr=+sve -S -o - %s | FileCheck %s + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" +target triple = "aarch64" + +define i64 @udiv() vscale_range(1, 16) { +; CHECK-LABEL: @udiv( +; CHECK-NEXT: ret i64 4 +; + %a = call i64 @llvm.aarch64.sve.cntb(i32 31) + %b = call i64 @llvm.aarch64.sve.cntw(i32 31) + %c = udiv i64 %a, %b + ret i64 %c +} diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/unreachable-blocks-with-phis.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/unreachable-blocks-with-phis.ll new file mode 100644 index 0000000000000..aeb82d800a2f7 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/unreachable-blocks-with-phis.ll @@ -0,0 +1,43 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s + +define void @test() { +; CHECK-LABEL: define void @test() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr null, align 1 +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_THEN:.*]]: +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP0]], %[[ENTRY]] ], [ poison, %[[IF_THEN]] ] +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0 +; CHECK-NEXT: store i32 [[TMP2]], ptr null, align 1 +; CHECK-NEXT: br label %[[TRAP:.*]] +; CHECK: [[BB3:.*:]] +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 +; CHECK-NEXT: store i32 [[TMP4]], ptr null, align 1 +; CHECK-NEXT: ret void +; CHECK: [[TRAP]]: +; CHECK-NEXT: unreachable +; +entry: + %g_2197.real32.pre = load i32, ptr null, align 1 + %g_2197.imag33.pre = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr null, i32 0, i32 1), align 1 + br label %if.end + +if.then: + br label %if.end + +if.end: + %g_2197.imag33 = phi i32 [ %g_2197.imag33.pre, %entry ], [ 0, %if.then ] + %g_2197.real32 = phi i32 [ %g_2197.real32.pre, %entry ], [ 0, %if.then ] + store i32 %g_2197.real32, ptr null, align 1 + br label %trap + +0: + store i32 %g_2197.imag33, ptr null, align 1 + ret void + +trap: + unreachable +} diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/small-phi-tree.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/small-phi-tree.ll new file mode 100644 index 0000000000000..c4f35d8dfc219 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/small-phi-tree.ll @@ -0,0 +1,40 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=riscv64 -mcpu=sifive-x280 -passes=slp-vectorizer -S -slp-threshold=-11 < %s | FileCheck %s + +define float @test(ptr %call78) { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x ptr> , ptr [[CALL78:%.*]], i32 1 +; CHECK-NEXT: br label [[FOR_BODY194:%.*]] +; CHECK: for.body194: +; CHECK-NEXT: [[INDVARS_IV132:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[FOR_BODY194]] ] +; CHECK-NEXT: [[CURRENTW_031:%.*]] = phi ptr [ [[CALL78]], [[ENTRY]] ], [ [[PREVIOUSW_030:%.*]], [[FOR_BODY194]] ] +; CHECK-NEXT: [[PREVIOUSW_030]] = phi ptr [ null, [[ENTRY]] ], [ [[CURRENTW_031]], [[FOR_BODY194]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x ptr> [ [[TMP0]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY194]] ] +; CHECK-NEXT: store float 0.000000e+00, ptr [[CURRENTW_031]], align 4 +; CHECK-NEXT: tail call void null(ptr [[PREVIOUSW_030]], ptr null, ptr null, i32 0, i32 0, ptr null, ptr null, i32 0) +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> poison, ptr [[CURRENTW_031]], i32 0 +; CHECK-NEXT: [[TMP3]] = insertelement <2 x ptr> [[TMP2]], ptr [[PREVIOUSW_030]], i32 1 +; CHECK-NEXT: br i1 false, label [[FOR_END286_LOOPEXIT:%.*]], label [[FOR_BODY194]] +; CHECK: for.end286.loopexit: +; CHECK-NEXT: [[TMP4:%.*]] = phi <2 x ptr> [ [[TMP1]], [[FOR_BODY194]] ] +; CHECK-NEXT: ret float 0.000000e+00 +; +entry: + br label %for.body194 + +for.body194: + %indvars.iv132 = phi i64 [ 0, %entry ], [ 0, %for.body194 ] + %currentw.031 = phi ptr [ %call78, %entry ], [ %previousw.030, %for.body194 ] + %previousw.030 = phi ptr [ null, %entry ], [ %currentw.031, %for.body194 ] + store float 0.000000e+00, ptr %currentw.031, align 4 + tail call void null(ptr %previousw.030, ptr null, ptr null, i32 0, i32 0, ptr null, ptr null, i32 0) + br i1 false, label %for.end286.loopexit, label %for.body194 + +for.end286.loopexit: + %currentw.031.lcssa = phi ptr [ %currentw.031, %for.body194 ] + %previousw.030.lcssa = phi ptr [ %previousw.030, %for.body194 ] + ret float 0.000000e+00 +} + + diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll index 6b18d4069e0ae..c076c0e849fa9 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s define void @store_load(ptr %ptr) { ; CHECK-LABEL: define void @store_load( diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll index 202b5a6fbd6c9..38c0816504481 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s declare void @foo() diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll index 1b189831569f5..f2eb124494b5e 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_seed_slice_pow2.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=false -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s --check-prefix=POW2 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=true -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s --check-prefix=NON-POW2 +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=false -sbvec-passes="seed-collection" %s -S | FileCheck %s --check-prefix=POW2 +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2=true -sbvec-passes="seed-collection" %s -S | FileCheck %s --check-prefix=NON-POW2 define void @pow2(ptr %ptr, float %val) { ; POW2-LABEL: define void @pow2( diff --git a/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll b/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll index ff1604173c317..c559f294f9695 100644 --- a/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll +++ b/llvm/test/Transforms/SandboxVectorizer/cross_bbs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s define void @cross_bbs(ptr %ptr) { ; CHECK-LABEL: define void @cross_bbs( diff --git a/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll b/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll index 10de4338caf23..86000da42c799 100644 --- a/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll +++ b/llvm/test/Transforms/SandboxVectorizer/default_pass_pipeline.ll @@ -5,8 +5,9 @@ ; This checks the default pass pipeline for the sandbox vectorizer. define void @pipeline() { ; CHECK: fpm -; CHECK: bottom-up-vec +; CHECK: seed-collection ; CHECK: rpm +; CHECK: bottom-up-vec ; CHECK: tr-accept-or-revert ; CHECK-EMPTY: ret void diff --git a/llvm/test/Transforms/SandboxVectorizer/pack.ll b/llvm/test/Transforms/SandboxVectorizer/pack.ll index da41036e3a58b..c5abddb7fba01 100644 --- a/llvm/test/Transforms/SandboxVectorizer/pack.ll +++ b/llvm/test/Transforms/SandboxVectorizer/pack.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s define void @pack_constants(ptr %ptr) { ; CHECK-LABEL: define void @pack_constants( diff --git a/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll b/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll index add762ac2d894..081267da77e5f 100644 --- a/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll +++ b/llvm/test/Transforms/SandboxVectorizer/repeated_instrs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s define i32 @repeated_splat(ptr %ptr, i32 %v) #0 { ; CHECK-LABEL: define i32 @repeated_splat( diff --git a/llvm/test/Transforms/SandboxVectorizer/scheduler.ll b/llvm/test/Transforms/SandboxVectorizer/scheduler.ll index acbec80db6b06..7741d8c64c8fc 100644 --- a/llvm/test/Transforms/SandboxVectorizer/scheduler.ll +++ b/llvm/test/Transforms/SandboxVectorizer/scheduler.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s ; This used to crash because the newly added pack instructions would not update ; the DAG and scheduler, leading to def-after-use. diff --git a/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll b/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll index e8fe8b4fa88e3..edb8d615e0055 100644 --- a/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll +++ b/llvm/test/Transforms/SandboxVectorizer/special_opcodes.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="bottom-up-vec" %s -S | FileCheck %s +; RUN: opt -passes=sandbox-vectorizer -sbvec-vec-reg-bits=1024 -sbvec-allow-non-pow2 -sbvec-passes="seed-collection" %s -S | FileCheck %s ; This file includes tests for opcodes that need special checks. diff --git a/llvm/test/Transforms/SandboxVectorizer/user_pass_pipeline.ll b/llvm/test/Transforms/SandboxVectorizer/user_pass_pipeline.ll index b11b55ed96019..2c57a8e7347d2 100644 --- a/llvm/test/Transforms/SandboxVectorizer/user_pass_pipeline.ll +++ b/llvm/test/Transforms/SandboxVectorizer/user_pass_pipeline.ll @@ -1,9 +1,9 @@ ; RUN: opt -passes=sandbox-vectorizer -sbvec-print-pass-pipeline \ -; RUN: -disable-output -sbvec-passes="bottom-up-vec" %s \ +; RUN: -disable-output -sbvec-passes="seed-collection" %s \ ; RUN: | FileCheck %s ; ; RUN: opt -passes=sandbox-vectorizer -sbvec-print-pass-pipeline \ -; RUN: -disable-output -sbvec-passes="bottom-up-vec<>,regions-from-metadata<>" %s \ +; RUN: -disable-output -sbvec-passes="seed-collection<>,regions-from-metadata<>" %s \ ; RUN: | FileCheck --check-prefix CHECK-MULTIPLE-FUNCTION-PASSES %s ; !!!WARNING!!! This won't get updated by update_test_checks.py ! @@ -14,14 +14,14 @@ define void @pipeline() { } ; CHECK: fpm -; CHECK: bottom-up-vec +; CHECK: seed-collection ; CHECK: rpm ; CHECK: null ; CHECK: null ; CHECK-EMPTY: ; CHECK-MULTIPLE-FUNCTION-PASSES: fpm -; CHECK-MULTIPLE-FUNCTION-PASSES: bottom-up-vec +; CHECK-MULTIPLE-FUNCTION-PASSES: seed-collection ; CHECK-MULTIPLE-FUNCTION-PASSES: rpm ; CHECK-MULTIPLE-FUNCTION-PASSES: regions-from-metadata ; CHECK-MULTIPLE-FUNCTION-PASSES: rpm diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp index 83c8f7e932b2b..a1ea7849d7c0c 100644 --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -5928,6 +5928,7 @@ TEST_F(OpenMPIRBuilderTest, TargetEnterData) { return CombinedInfo; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; llvm::OpenMPIRBuilder::TargetDataInfo Info( /*RequiresDevicePointerInfo=*/false, /*SeparateBeginEndCalls=*/true); @@ -5939,7 +5940,7 @@ TEST_F(OpenMPIRBuilderTest, TargetEnterData) { OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTargetData( Loc, AllocaIP, Builder.saveIP(), Builder.getInt64(DeviceID), - /* IfCond= */ nullptr, Info, GenMapInfoCB, &RTLFunc)); + /* IfCond= */ nullptr, Info, GenMapInfoCB, CustomMapperCB, &RTLFunc)); Builder.restoreIP(AfterIP); CallInst *TargetDataCall = dyn_cast(&BB->back()); @@ -5990,6 +5991,7 @@ TEST_F(OpenMPIRBuilderTest, TargetExitData) { return CombinedInfo; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; llvm::OpenMPIRBuilder::TargetDataInfo Info( /*RequiresDevicePointerInfo=*/false, /*SeparateBeginEndCalls=*/true); @@ -6001,7 +6003,7 @@ TEST_F(OpenMPIRBuilderTest, TargetExitData) { OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTargetData( Loc, AllocaIP, Builder.saveIP(), Builder.getInt64(DeviceID), - /* IfCond= */ nullptr, Info, GenMapInfoCB, &RTLFunc)); + /* IfCond= */ nullptr, Info, GenMapInfoCB, CustomMapperCB, &RTLFunc)); Builder.restoreIP(AfterIP); CallInst *TargetDataCall = dyn_cast(&BB->back()); @@ -6074,6 +6076,7 @@ TEST_F(OpenMPIRBuilderTest, TargetDataRegion) { return CombinedInfo; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; llvm::OpenMPIRBuilder::TargetDataInfo Info( /*RequiresDevicePointerInfo=*/true, /*SeparateBeginEndCalls=*/true); @@ -6110,9 +6113,10 @@ TEST_F(OpenMPIRBuilderTest, TargetDataRegion) { ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, TargetDataIP1, - OMPBuilder.createTargetData( - Loc, AllocaIP, Builder.saveIP(), Builder.getInt64(DeviceID), - /* IfCond= */ nullptr, Info, GenMapInfoCB, nullptr, BodyCB)); + OMPBuilder.createTargetData(Loc, AllocaIP, Builder.saveIP(), + Builder.getInt64(DeviceID), + /* IfCond= */ nullptr, Info, GenMapInfoCB, + CustomMapperCB, nullptr, BodyCB)); Builder.restoreIP(TargetDataIP1); CallInst *TargetDataCall = dyn_cast(&BB->back()); @@ -6138,9 +6142,10 @@ TEST_F(OpenMPIRBuilderTest, TargetDataRegion) { }; ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, TargetDataIP2, - OMPBuilder.createTargetData( - Loc, AllocaIP, Builder.saveIP(), Builder.getInt64(DeviceID), - /* IfCond= */ nullptr, Info, GenMapInfoCB, nullptr, BodyTargetCB)); + OMPBuilder.createTargetData(Loc, AllocaIP, Builder.saveIP(), + Builder.getInt64(DeviceID), + /* IfCond= */ nullptr, Info, GenMapInfoCB, + CustomMapperCB, nullptr, BodyTargetCB)); Builder.restoreIP(TargetDataIP2); EXPECT_TRUE(CheckDevicePassBodyGen); @@ -6241,6 +6246,11 @@ TEST_F(OpenMPIRBuilderTest, TargetRegion) { return CombinedInfos; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; + llvm::OpenMPIRBuilder::TargetDataInfo Info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); + TargetRegionEntryInfo EntryInfo("func", 42, 4711, 17); OpenMPIRBuilder::LocationDescription OmpLoc({Builder.saveIP(), DL}); OpenMPIRBuilder::TargetKernelRuntimeAttrs RuntimeAttrs; @@ -6254,9 +6264,10 @@ TEST_F(OpenMPIRBuilderTest, TargetRegion) { ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTarget(OmpLoc, /*IsOffloadEntry=*/true, Builder.saveIP(), - Builder.saveIP(), EntryInfo, DefaultAttrs, + Builder.saveIP(), Info, EntryInfo, DefaultAttrs, RuntimeAttrs, /*IfCond=*/nullptr, Inputs, - GenMapInfoCB, BodyGenCB, SimpleArgAccessorCB)); + GenMapInfoCB, BodyGenCB, SimpleArgAccessorCB, + CustomMapperCB, {}, false)); EXPECT_EQ(DL, Builder.getCurrentDebugLocation()); Builder.restoreIP(AfterIP); @@ -6400,6 +6411,7 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionDevice) { return CombinedInfos; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; auto BodyGenCB = [&](OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP) -> OpenMPIRBuilder::InsertPointTy { @@ -6419,13 +6431,17 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionDevice) { OpenMPIRBuilder::TargetKernelDefaultAttrs DefaultAttrs = { /*ExecFlags=*/omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_GENERIC, /*MaxTeams=*/{-1}, /*MinTeams=*/0, /*MaxThreads=*/{0}, /*MinThreads=*/0}; + llvm::OpenMPIRBuilder::TargetDataInfo Info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTarget(Loc, /*IsOffloadEntry=*/true, EntryIP, EntryIP, - EntryInfo, DefaultAttrs, RuntimeAttrs, + Info, EntryInfo, DefaultAttrs, RuntimeAttrs, /*IfCond=*/nullptr, CapturedArgs, GenMapInfoCB, - BodyGenCB, SimpleArgAccessorCB)); + BodyGenCB, SimpleArgAccessorCB, CustomMapperCB, + {}, false)); EXPECT_EQ(DL, Builder.getCurrentDebugLocation()); Builder.restoreIP(AfterIP); @@ -6549,6 +6565,7 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionSPMD) { F->setName("func"); IRBuilder<> Builder(BB); + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; auto BodyGenCB = [&](InsertPointTy, InsertPointTy CodeGenIP) -> InsertPointTy { Builder.restoreIP(CodeGenIP); @@ -6576,13 +6593,17 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionSPMD) { /*ExecFlags=*/omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_SPMD, /*MaxTeams=*/{-1}, /*MinTeams=*/0, /*MaxThreads=*/{0}, /*MinThreads=*/0}; RuntimeAttrs.LoopTripCount = Builder.getInt64(1000); + llvm::OpenMPIRBuilder::TargetDataInfo Info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTarget(OmpLoc, /*IsOffloadEntry=*/true, Builder.saveIP(), - Builder.saveIP(), EntryInfo, DefaultAttrs, + Builder.saveIP(), Info, EntryInfo, DefaultAttrs, RuntimeAttrs, /*IfCond=*/nullptr, Inputs, - GenMapInfoCB, BodyGenCB, SimpleArgAccessorCB)); + GenMapInfoCB, BodyGenCB, SimpleArgAccessorCB, + CustomMapperCB)); Builder.restoreIP(AfterIP); OMPBuilder.finalize(); @@ -6663,6 +6684,7 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionDeviceSPMD) { return CombinedInfos; }; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; auto BodyGenCB = [&](OpenMPIRBuilder::InsertPointTy, OpenMPIRBuilder::InsertPointTy CodeGenIP) -> OpenMPIRBuilder::InsertPointTy { @@ -6679,13 +6701,16 @@ TEST_F(OpenMPIRBuilderTest, TargetRegionDeviceSPMD) { OpenMPIRBuilder::TargetKernelDefaultAttrs DefaultAttrs = { /*ExecFlags=*/omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_SPMD, /*MaxTeams=*/{-1}, /*MinTeams=*/0, /*MaxThreads=*/{0}, /*MinThreads=*/0}; + llvm::OpenMPIRBuilder::TargetDataInfo Info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTarget(Loc, /*IsOffloadEntry=*/true, EntryIP, EntryIP, - EntryInfo, DefaultAttrs, RuntimeAttrs, + Info, EntryInfo, DefaultAttrs, RuntimeAttrs, /*IfCond=*/nullptr, CapturedArgs, GenMapInfoCB, - BodyGenCB, SimpleArgAccessorCB)); + BodyGenCB, SimpleArgAccessorCB, CustomMapperCB)); Builder.restoreIP(AfterIP); Builder.CreateRetVoid(); @@ -6779,6 +6804,7 @@ TEST_F(OpenMPIRBuilderTest, ConstantAllocaRaise) { llvm::Value *RaiseAlloca = nullptr; + auto CustomMapperCB = [&](unsigned int I) { return nullptr; }; auto BodyGenCB = [&](OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP) -> OpenMPIRBuilder::InsertPointTy { @@ -6799,13 +6825,17 @@ TEST_F(OpenMPIRBuilderTest, ConstantAllocaRaise) { OpenMPIRBuilder::TargetKernelDefaultAttrs DefaultAttrs = { /*ExecFlags=*/omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_GENERIC, /*MaxTeams=*/{-1}, /*MinTeams=*/0, /*MaxThreads=*/{0}, /*MinThreads=*/0}; + llvm::OpenMPIRBuilder::TargetDataInfo Info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); ASSERT_EXPECTED_INIT( OpenMPIRBuilder::InsertPointTy, AfterIP, OMPBuilder.createTarget(Loc, /*IsOffloadEntry=*/true, EntryIP, EntryIP, - EntryInfo, DefaultAttrs, RuntimeAttrs, + Info, EntryInfo, DefaultAttrs, RuntimeAttrs, /*IfCond=*/nullptr, CapturedArgs, GenMapInfoCB, - BodyGenCB, SimpleArgAccessorCB)); + BodyGenCB, SimpleArgAccessorCB, CustomMapperCB, + {}, false)); EXPECT_EQ(DL, Builder.getCurrentDebugLocation()); Builder.restoreIP(AfterIP); diff --git a/llvm/unittests/SandboxIR/RegionTest.cpp b/llvm/unittests/SandboxIR/RegionTest.cpp index 09c578bcfefaa..992c3f2cbd2ea 100644 --- a/llvm/unittests/SandboxIR/RegionTest.cpp +++ b/llvm/unittests/SandboxIR/RegionTest.cpp @@ -386,6 +386,49 @@ define void @foo(i8 %v) { ".*already.*"); } +// Check that Aux automatically drops instructions that get deleted. +TEST_F(RegionTest, AuxDeleteInstr) { + parseIR(C, R"IR( +define void @foo(i8 %v) { + %Add0 = add i8 %v, 0, !sandboxvec !0, !sandboxaux !1 + %Add1 = add i8 %v, 1, !sandboxvec !0, !sandboxaux !2 + %Add2 = add i8 %v, 2, !sandboxvec !0, !sandboxaux !3 + %Add3 = add i8 %v, 2, !sandboxvec !0, !sandboxaux !4 + ret void +} + +!0 = distinct !{!"sandboxregion"} +!1 = !{i32 0} +!2 = !{i32 1} +!3 = !{i32 2} +!4 = !{i32 3} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *Add0 = &*It++; + auto *Add1 = &*It++; + auto *Add2 = &*It++; + auto *Add3 = &*It++; + SmallVector> Regions = + sandboxir::Region::createRegionsFromMD(*F, *TTI); + auto &R = *Regions[0]; + EXPECT_THAT(R.getAux(), testing::ElementsAre(Add0, Add1, Add2, Add3)); + // Now delete Add1 and check that Aux contains nullptr instead of Add1. + Add2->eraseFromParent(); + EXPECT_THAT(R.getAux(), testing::ElementsAre(Add0, Add1, Add3)); + { + // Check that metadata have also been updated. + // But first drop Add3 to create a legal Aux vector with no gaps. + Add3->eraseFromParent(); + SmallVector> Regions = + sandboxir::Region::createRegionsFromMD(*F, *TTI); + EXPECT_THAT(Regions[0]->getAux(), testing::ElementsAre(Add0, Add1)); + } +} + TEST_F(RegionTest, AuxRoundTrip) { parseIR(C, R"IR( define i8 @foo(i8 %v0, i8 %v1) { diff --git a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp index 68338b569a208..63ac8f993ecdc 100644 --- a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp +++ b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp @@ -14,20 +14,20 @@ using namespace llvm; namespace { TEST(RISCVVType, CheckSameRatioLMUL) { // Smaller LMUL. - EXPECT_EQ(RISCVII::LMUL_1, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_2, 8)); - EXPECT_EQ(RISCVII::LMUL_F2, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_1, 8)); + EXPECT_EQ(RISCVVType::LMUL_1, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_2, 8)); + EXPECT_EQ(RISCVVType::LMUL_F2, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_1, 8)); // Smaller fractional LMUL. - EXPECT_EQ(RISCVII::LMUL_F8, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_F4, 8)); + EXPECT_EQ(RISCVVType::LMUL_F8, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_F4, 8)); // Bigger LMUL. - EXPECT_EQ(RISCVII::LMUL_2, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_1, 16)); - EXPECT_EQ(RISCVII::LMUL_1, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F2, 16)); + EXPECT_EQ(RISCVVType::LMUL_2, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_1, 16)); + EXPECT_EQ(RISCVVType::LMUL_1, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F2, 16)); // Bigger fractional LMUL. - EXPECT_EQ(RISCVII::LMUL_F2, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F4, 16)); + EXPECT_EQ(RISCVVType::LMUL_F2, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F4, 16)); } } // namespace diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp index bb809bf33420e..9a7ee8214d10a 100644 --- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp @@ -940,10 +940,11 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %new1, i8 %new2) { TEST_F(DependencyGraphTest, EraseInstrCallback) { parseIR(C, R"IR( -define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %arg) { +define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %arg) { store i8 %v1, ptr %ptr store i8 %v2, ptr %ptr store i8 %v3, ptr %ptr + store i8 %v4, ptr %ptr ret void } )IR"); @@ -955,17 +956,27 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %arg) { auto *S1 = cast(&*It++); auto *S2 = cast(&*It++); auto *S3 = cast(&*It++); + auto *S4NotInDAG = cast(&*It++); // Check erase instruction callback. sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx); DAG.extend({S1, S3}); + auto *S1MemN = cast(DAG.getNode(S1)); + auto *S2MemN = cast(DAG.getNode(S2)); + auto *S3MemN = cast(DAG.getNode(S3)); + EXPECT_EQ(S1MemN->getNumUnscheduledSuccs(), 2u); + EXPECT_EQ(S2MemN->getNumUnscheduledSuccs(), 1u); + EXPECT_EQ(S3MemN->getNumUnscheduledSuccs(), 0u); S2->eraseFromParent(); - auto *DeletedN = DAG.getNodeOrNull(S2); + // Check that the DAG Node for S2 no longer exists. + auto *DeletedN = DAG.getNode(S2); EXPECT_TRUE(DeletedN == nullptr); + // Check that dependencies are maintained. + EXPECT_THAT(S3MemN->preds(DAG), testing::UnorderedElementsAre(S1MemN)); + // Also check that UnscheduledSuccs was updated for S1. + EXPECT_EQ(S1MemN->getNumUnscheduledSuccs(), 1u); // Check the MemDGNode chain. - auto *S1MemN = cast(DAG.getNode(S1)); - auto *S3MemN = cast(DAG.getNode(S3)); EXPECT_EQ(S1MemN->getNextNode(), S3MemN); EXPECT_EQ(S3MemN->getPrevNode(), S1MemN); @@ -973,7 +984,39 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %arg) { S1->eraseFromParent(); EXPECT_EQ(S3MemN->getPrevNode(), nullptr); - // TODO: Check the dependencies to/from NewSN after they land. + // Check that we don't crash if we erase a node not in the DAG. + S4NotInDAG->eraseFromParent(); +} + +// Same but check that we don't update UnscheduledSuccs when Node is scheduled. +TEST_F(DependencyGraphTest, EraseInstrCallbackScheduled) { + parseIR(C, R"IR( +define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %arg) { + store i8 %v1, ptr %ptr + store i8 %v2, ptr %ptr + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + auto *S1 = cast(&*It++); + auto *S2 = cast(&*It++); + + sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx); + DAG.extend({S1, S2}); + auto *S1MemN = cast(DAG.getNode(S1)); + auto *S2MemN = cast(DAG.getNode(S2)); + EXPECT_EQ(S1MemN->getNumUnscheduledSuccs(), 1u); + EXPECT_EQ(S2MemN->getNumUnscheduledSuccs(), 0u); + // Mark S2 as scheduled and erase it. + S2MemN->setScheduled(true); + S2->eraseFromParent(); + EXPECT_EQ(DAG.getNode(S2), nullptr); + // Check that we did not update S1's UnscheduledSuccs + EXPECT_EQ(S1MemN->getNumUnscheduledSuccs(), 1u); } TEST_F(DependencyGraphTest, MoveInstrCallback) { diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.h b/llvm/utils/TableGen/Common/CodeGenSchedule.h index 981782c17c48c..8343257b458dd 100644 --- a/llvm/utils/TableGen/Common/CodeGenSchedule.h +++ b/llvm/utils/TableGen/Common/CodeGenSchedule.h @@ -467,23 +467,6 @@ class CodeGenSchedModels { public: CodeGenSchedModels(const RecordKeeper &RK, const CodeGenTarget &TGT); - // iterator access to the scheduling classes. - using class_iterator = std::vector::iterator; - using const_class_iterator = std::vector::const_iterator; - class_iterator classes_begin() { return SchedClasses.begin(); } - const_class_iterator classes_begin() const { return SchedClasses.begin(); } - class_iterator classes_end() { return SchedClasses.end(); } - const_class_iterator classes_end() const { return SchedClasses.end(); } - iterator_range classes() { - return make_range(classes_begin(), classes_end()); - } - iterator_range classes() const { - return make_range(classes_begin(), classes_end()); - } - ArrayRef explicit_classes() const { - return ArrayRef(SchedClasses).take_front(NumInstrSchedClasses); - } - const Record *getModelOrItinDef(const Record *ProcDef) const { const Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); const Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); @@ -497,13 +480,13 @@ class CodeGenSchedModels { const CodeGenProcModel &getModelForProc(const Record *ProcDef) const { const Record *ModelDef = getModelOrItinDef(ProcDef); - ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); + auto I = ProcModelMap.find(ModelDef); assert(I != ProcModelMap.end() && "missing machine model"); return ProcModels[I->second]; } const CodeGenProcModel &getProcModel(const Record *ModelDef) const { - ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); + auto I = ProcModelMap.find(ModelDef); assert(I != ProcModelMap.end() && "missing machine model"); return ProcModels[I->second]; } @@ -512,10 +495,6 @@ class CodeGenSchedModels { static_cast(*this).getProcModel(ModelDef)); } - // Iterate over the unique processor models. - using ProcIter = std::vector::const_iterator; - ProcIter procModelBegin() const { return ProcModels.begin(); } - ProcIter procModelEnd() const { return ProcModels.end(); } ArrayRef procModels() const { return ProcModels; } // Return true if any processors have itineraries. @@ -564,10 +543,10 @@ class CodeGenSchedModels { // for NoItinerary. unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const; - using SchedClassIter = std::vector::const_iterator; - SchedClassIter schedClassBegin() const { return SchedClasses.begin(); } - SchedClassIter schedClassEnd() const { return SchedClasses.end(); } ArrayRef schedClasses() const { return SchedClasses; } + ArrayRef explicitSchedClasses() const { + return schedClasses().take_front(NumInstrSchedClasses); + } unsigned numInstrSchedClasses() const { return NumInstrSchedClasses; } diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp index 3ea76ed414d91..377bfb593be5f 100644 --- a/llvm/utils/TableGen/InstrInfoEmitter.cpp +++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp @@ -1244,7 +1244,7 @@ void InstrInfoEmitter::emitEnums( OS << "#undef GET_INSTRINFO_SCHED_ENUM\n"; OS << "namespace llvm::" << Namespace << "::Sched {\n\n"; OS << " enum {\n"; - auto ExplictClasses = SchedModels.explicit_classes(); + auto ExplictClasses = SchedModels.explicitSchedClasses(); for (const auto &[Idx, Class] : enumerate(ExplictClasses)) OS << " " << Class.Name << "\t= " << Idx << ",\n"; OS << " SCHED_LIST_END = " << ExplictClasses.size() << '\n'; diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp index aec05f1ae7742..d4510f2757349 100644 --- a/llvm/utils/TableGen/SubtargetEmitter.cpp +++ b/llvm/utils/TableGen/SubtargetEmitter.cpp @@ -106,7 +106,7 @@ class SubtargetEmitter { void emitStageAndOperandCycleData( raw_ostream &OS, std::vector> &ProcItinLists); void emitItineraries(raw_ostream &OS, - std::vector> &ProcItinLists); + ArrayRef> ProcItinLists); unsigned emitRegisterFileTables(const CodeGenProcModel &ProcModel, raw_ostream &OS); void emitLoadStoreQueueInfo(const CodeGenProcModel &ProcModel, @@ -477,7 +477,6 @@ void SubtargetEmitter::emitStageAndOperandCycleData( // Emit functional units for all the itineraries. for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) { - if (!ItinsDefSet.insert(ProcModel.ItinsDef).second) continue; @@ -489,25 +488,23 @@ void SubtargetEmitter::emitStageAndOperandCycleData( OS << "\n// Functional units for \"" << Name << "\"\n" << "namespace " << Name << "FU {\n"; - for (unsigned J = 0, FUN = FUs.size(); J < FUN; ++J) - OS << " const InstrStage::FuncUnits " << FUs[J]->getName() - << " = 1ULL << " << J << ";\n"; + for (const auto &[Idx, FU] : enumerate(FUs)) + OS << " const InstrStage::FuncUnits " << FU->getName() << " = 1ULL << " + << Idx << ";\n"; OS << "} // end namespace " << Name << "FU\n"; ConstRecVec BPs = ProcModel.ItinsDef->getValueAsListOfDefs("BP"); - if (!BPs.empty()) { - OS << "\n// Pipeline forwarding paths for itineraries \"" << Name - << "\"\n" - << "namespace " << Name << "Bypass {\n"; + if (BPs.empty()) + continue; + OS << "\n// Pipeline forwarding paths for itineraries \"" << Name << "\"\n" + << "namespace " << Name << "Bypass {\n"; - OS << " const unsigned NoBypass = 0;\n"; - for (unsigned J = 0, BPN = BPs.size(); J < BPN; ++J) - OS << " const unsigned " << BPs[J]->getName() << " = 1 << " << J - << ";\n"; + OS << " const unsigned NoBypass = 0;\n"; + for (const auto &[Idx, BP] : enumerate(BPs)) + OS << " const unsigned " << BP->getName() << " = 1 << " << Idx << ";\n"; - OS << "} // end namespace " << Name << "Bypass\n"; - } + OS << "} // end namespace " << Name << "Bypass\n"; } // Begin stages table @@ -647,46 +644,39 @@ void SubtargetEmitter::emitStageAndOperandCycleData( // CodeGenSchedClass::Index. // void SubtargetEmitter::emitItineraries( - raw_ostream &OS, std::vector> &ProcItinLists) { + raw_ostream &OS, ArrayRef> ProcItinLists) { // Multiple processor models may share an itinerary record. Emit it once. SmallPtrSet ItinsDefSet; - // For each processor's machine model - std::vector>::iterator ProcItinListsIter = - ProcItinLists.begin(); - for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), - PE = SchedModels.procModelEnd(); - PI != PE; ++PI, ++ProcItinListsIter) { - - const Record *ItinsDef = PI->ItinsDef; + for (const auto &[Proc, ItinList] : + zip_equal(SchedModels.procModels(), ProcItinLists)) { + const Record *ItinsDef = Proc.ItinsDef; if (!ItinsDefSet.insert(ItinsDef).second) continue; - // Get the itinerary list for the processor. - assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator"); - std::vector &ItinList = *ProcItinListsIter; - // Empty itineraries aren't referenced anywhere in the tablegen output // so don't emit them. if (ItinList.empty()) continue; + // Begin processor itinerary table OS << "\n"; - OS << "static const llvm::InstrItinerary "; + OS << "static constexpr llvm::InstrItinerary " << ItinsDef->getName() + << "[] = {\n"; - // Begin processor itinerary table - OS << ItinsDef->getName() << "[] = {\n"; + ArrayRef ItinSchedClasses = + SchedModels.schedClasses().take_front(ItinList.size()); // For each itinerary class in CodeGenSchedClass::Index order. - for (unsigned J = 0, M = ItinList.size(); J < M; ++J) { - InstrItinerary &Intinerary = ItinList[J]; - + for (const auto &[Idx, Intinerary, SchedClass] : + enumerate(ItinList, ItinSchedClasses)) { // Emit Itinerary in the form of - // { firstStage, lastStage, firstCycle, lastCycle } // index + // { NumMicroOps, FirstStage, LastStage, FirstOperandCycle, + // LastOperandCycle } // index class name OS << " { " << Intinerary.NumMicroOps << ", " << Intinerary.FirstStage << ", " << Intinerary.LastStage << ", " << Intinerary.FirstOperandCycle - << ", " << Intinerary.LastOperandCycle << " }" - << ", // " << J << " " << SchedModels.getSchedClass(J).Name << "\n"; + << ", " << Intinerary.LastOperandCycle << " }" << ", // " << Idx << " " + << SchedClass.Name << "\n"; } // End processor itinerary table OS << " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }" @@ -1442,18 +1432,16 @@ void SubtargetEmitter::emitSchedClassTables(SchedClassTables &SchedTables, OS << "}; // " << Target << "ReadAdvanceTable\n"; // Emit a SchedClass table for each processor. - for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), - PE = SchedModels.procModelEnd(); - PI != PE; ++PI) { - if (!PI->hasInstrSchedModel()) + for (const auto &[Idx, Proc] : enumerate(SchedModels.procModels())) { + if (!Proc.hasInstrSchedModel()) continue; std::vector &SCTab = - SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())]; + SchedTables.ProcSchedClasses[1 + Idx]; OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO," << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n"; - OS << "static const llvm::MCSchedClassDesc " << PI->ModelName + OS << "static const llvm::MCSchedClassDesc " << Proc.ModelName << "SchedClasses[] = {\n"; // The first class is always invalid. We no way to distinguish it except by @@ -1480,7 +1468,7 @@ void SubtargetEmitter::emitSchedClassTables(SchedClassTables &SchedTables, << format("%2d", MCDesc.ReadAdvanceIdx) << ", " << MCDesc.NumReadAdvanceEntries << "}, // #" << SCIdx << '\n'; } - OS << "}; // " << PI->ModelName << "SchedClasses\n"; + OS << "}; // " << Proc.ModelName << "SchedClasses\n"; } } @@ -1528,14 +1516,10 @@ void SubtargetEmitter::emitProcessorModels(raw_ostream &OS) { OS << " " << PM.Index << ", // Processor ID\n"; if (PM.hasInstrSchedModel()) - OS << " " << PM.ModelName << "ProcResources" - << ",\n" - << " " << PM.ModelName << "SchedClasses" - << ",\n" + OS << " " << PM.ModelName << "ProcResources" << ",\n" + << " " << PM.ModelName << "SchedClasses" << ",\n" << " " << PM.ProcResourceDefs.size() + 1 << ",\n" - << " " - << (SchedModels.schedClassEnd() - SchedModels.schedClassBegin()) - << ",\n"; + << " " << SchedModels.schedClasses().size() << ",\n"; else OS << " nullptr, nullptr, 0, 0," << " // No instruction-level machine model.\n"; @@ -1747,7 +1731,7 @@ void SubtargetEmitter::emitSchedModelHelpersImpl( ? "if (CPUID == " : "if (SchedModel->getProcessorID() == "); OS << PI << ") "; - OS << "{ // " << (SchedModels.procModelBegin() + PI)->ModelName << '\n'; + OS << "{ // " << SchedModels.procModels()[PI].ModelName << '\n'; } // Now emit transitions associated with processor PI. diff --git a/llvm/utils/gn/secondary/bolt/include/bolt/Core/BUILD.gn b/llvm/utils/gn/secondary/bolt/include/bolt/Core/BUILD.gn new file mode 100644 index 0000000000000..bf5cbf1ff0cd9 --- /dev/null +++ b/llvm/utils/gn/secondary/bolt/include/bolt/Core/BUILD.gn @@ -0,0 +1,14 @@ +import("//llvm/lib/Target/write_target_def_file.gni") + +bolt_targets_to_build = [] +foreach(target, llvm_targets_to_build) { + if (target == "AArch64" || target == "RISCV" || target == "X86") { + bolt_targets_to_build += [ target ] + } +} + +write_target_def_file("TargetConfig.def") { + key = "BOLT_ENUM_TARGETS" + value = "BOLT_TARGET" + all_targets = bolt_targets_to_build +} diff --git a/llvm/utils/gn/secondary/bolt/tools/driver/BUILD.gn b/llvm/utils/gn/secondary/bolt/tools/driver/BUILD.gn index 004a7359698de..c174bf3c613f4 100644 --- a/llvm/utils/gn/secondary/bolt/tools/driver/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/tools/driver/BUILD.gn @@ -23,6 +23,7 @@ group("symlinks") { executable("llvm-bolt") { configs += [ "//llvm/utils/gn/build:bolt_code" ] deps = [ + "//bolt/include/bolt/Core:TargetConfig.def", "//bolt/lib/Profile", "//bolt/lib/Rewrite", "//bolt/lib/Utils", diff --git a/llvm/utils/gn/secondary/bolt/tools/heatmap/BUILD.gn b/llvm/utils/gn/secondary/bolt/tools/heatmap/BUILD.gn index b6270106dbaf8..78b65a12e945a 100644 --- a/llvm/utils/gn/secondary/bolt/tools/heatmap/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/tools/heatmap/BUILD.gn @@ -1,6 +1,7 @@ executable("llvm-bolt-heatmap") { configs += [ "//llvm/utils/gn/build:bolt_code" ] deps = [ + "//bolt/include/bolt/Core:TargetConfig.def", "//bolt/lib/Profile", "//bolt/lib/Rewrite", "//bolt/lib/Utils", diff --git a/llvm/utils/gn/secondary/bolt/unittests/Core/BUILD.gn b/llvm/utils/gn/secondary/bolt/unittests/Core/BUILD.gn index c7c9459fdff16..79f19a416c0e1 100644 --- a/llvm/utils/gn/secondary/bolt/unittests/Core/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/unittests/Core/BUILD.gn @@ -4,6 +4,7 @@ import("//third-party/unittest/unittest.gni") unittest("CoreTests") { configs += [ "//llvm/utils/gn/build:bolt_code" ] deps = [ + "//bolt/include/bolt/Core:TargetConfig.def", "//bolt/lib/Core", "//bolt/lib/Rewrite", "//bolt/lib/Profile", diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn index c9f3af65a4565..5a13545a15b13 100644 --- a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn @@ -1,10 +1,10 @@ import("//llvm/include/llvm/Config/config.gni") import("//llvm/lib/DebugInfo/PDB/enable_dia.gni") -import("//llvm/lib/Target/targets.gni") import("//llvm/lib/Target/targets_with_asm_parsers.gni") import("//llvm/lib/Target/targets_with_disassemblers.gni") import("//llvm/lib/Target/targets_with_exegesis.gni") import("//llvm/lib/Target/targets_with_mcas.gni") +import("//llvm/lib/Target/write_target_def_file.gni") import("//llvm/triples.gni") import("//llvm/utils/gn/build/buildflags.gni") import("//llvm/utils/gn/build/libs/curl/enable.gni") @@ -477,65 +477,41 @@ write_cmake_config("llvm-config") { ############################################################################## # .def files used by llvm/lib/Target -template("write_target_def_file") { - assert(defined(invoker.key), "must set 'key' in $target_name") - assert(defined(invoker.value), "must set 'value' in $target_name") - - write_cmake_config(target_name) { - visibility = [ ":write_target_def_files" ] - input = "$target_name.in" - output = "$target_gen_dir/$target_name" - - if (defined(invoker.all_targets)) { - all_targets = invoker.all_targets - } else { - all_targets = llvm_targets_to_build - } - - # Build something like - # `LLVM_ENUM_ASM_PARSERS=LLVM_ASM_PARSER(ARM)\nLLVM_ASM_PARSER(X86)\n`. Note - # that \n is a literal '\' followed by a literal 'n', not a newline - # character. (write_cmake_config.py replaces that with a real newline). - value = "" - foreach(target, all_targets) { - value = "$value${invoker.value}($target)\n" - } - if (all_targets == []) { - not_needed(invoker, [ "value" ]) - } - values = [ "${invoker.key}=$value" ] - } -} - write_target_def_file("AsmParsers.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_ASM_PARSERS" value = "LLVM_ASM_PARSER" all_targets = targets_with_asm_parsers } write_target_def_file("AsmPrinters.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_ASM_PRINTERS" value = "LLVM_ASM_PRINTER" } write_target_def_file("Disassemblers.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_DISASSEMBLERS" value = "LLVM_DISASSEMBLER" all_targets = targets_with_disassemblers } write_target_def_file("Targets.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_TARGETS" value = "LLVM_TARGET" } write_target_def_file("TargetMCAs.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_TARGETMCAS" value = "LLVM_TARGETMCA" all_targets = targets_with_mcas } write_target_def_file("TargetExegesis.def") { + visibility = [ ":write_target_def_files" ] key = "LLVM_ENUM_EXEGESIS" value = "LLVM_EXEGESIS" all_targets = targets_with_exegesis diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/write_target_def_file.gni b/llvm/utils/gn/secondary/llvm/lib/Target/write_target_def_file.gni new file mode 100644 index 0000000000000..8ff5edeb41f3d --- /dev/null +++ b/llvm/utils/gn/secondary/llvm/lib/Target/write_target_def_file.gni @@ -0,0 +1,36 @@ +import("//llvm/lib/Target/targets.gni") +import("//llvm/utils/gn/build/write_cmake_config.gni") + +template("write_target_def_file") { + assert(defined(invoker.key), "must set 'key' in $target_name") + assert(defined(invoker.value), "must set 'value' in $target_name") + + write_cmake_config(target_name) { + input = "$target_name.in" + output = "$target_gen_dir/$target_name" + + if (defined(invoker.all_targets)) { + all_targets = invoker.all_targets + } else { + all_targets = llvm_targets_to_build + } + + if (defined(invoker.visibility)) { + visibility = invoker.visibility + } + + # Build something like + # `LLVM_ENUM_ASM_PARSERS=LLVM_ASM_PARSER(ARM)\nLLVM_ASM_PARSER(X86)\n`. Note + # that \n is a literal '\' followed by a literal 'n', not a newline + # character. (write_cmake_config.py replaces that with a real newline). + value = "" + foreach(target, all_targets) { + value = "$value${invoker.value}($target)\n" + } + if (all_targets == []) { + not_needed(invoker, [ "value" ]) + } + values = [ "${invoker.key}=$value" ] + } +} + diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn index 433a7f43bb780..71493498ef03f 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn @@ -19,7 +19,9 @@ static_library("Vectorize") { "SandboxVectorizer/Legality.cpp", "SandboxVectorizer/Passes/BottomUpVec.cpp", "SandboxVectorizer/Passes/RegionsFromMetadata.cpp", + "SandboxVectorizer/Passes/SeedCollection.cpp", "SandboxVectorizer/Passes/TransactionAcceptOrRevert.cpp", + "SandboxVectorizer/Passes/TransactionSave.cpp", "SandboxVectorizer/SandboxVectorizer.cpp", "SandboxVectorizer/SandboxVectorizerPassBuilder.cpp", "SandboxVectorizer/Scheduler.cpp", diff --git a/mlir/cmake/modules/MLIRDetectPythonEnv.cmake b/mlir/cmake/modules/MLIRDetectPythonEnv.cmake index 3a87d39c28a06..f7a6fa6248440 100644 --- a/mlir/cmake/modules/MLIRDetectPythonEnv.cmake +++ b/mlir/cmake/modules/MLIRDetectPythonEnv.cmake @@ -22,6 +22,21 @@ macro(mlir_configure_python_dev_packages) find_package(Python3 ${LLVM_MINIMUM_PYTHON_VERSION} COMPONENTS Interpreter ${_python_development_component} REQUIRED) + # We look for both Python3 and Python, the search algorithm should be + # consistent, otherwise disastrous result is almost guaranteed. + # Warn if the policies for treating virtual environment are not defined + # consistently. + # For more details check issue #126162. + if(((DEFINED Python_FIND_VIRTUALENV) AND (NOT DEFINED Python3_FIND_VIRTUALENV)) OR + ((NOT DEFINED Python_FIND_VIRTUALENV) AND (DEFINED Python3_FIND_VIRTUALENV))) + message(WARNING "Only one of Python3_FIND_VIRTUALENV and Python_FIND_VIRTUALENV variables is defined. " + "Make sure that both variables are defined and have the same value.") + elseif((DEFINED Python_FIND_VIRTUALENV) AND (DEFINED Python3_FIND_VIRTUALENV) AND + (NOT Python_FIND_VIRTUALENV STREQUAL Python3_FIND_VIRTUALENV)) + message(WARNING "Python3_FIND_VIRTUALENV and Python_FIND_VIRTUALENV are defined differently. " + "Make sure that the variables have the same values.") + endif() + # It's a little silly to detect Python a second time, but nanobind's cmake # code looks for Python_ not Python3_. find_package(Python ${LLVM_MINIMUM_PYTHON_VERSION} diff --git a/mlir/include/mlir/Analysis/DataFlowFramework.h b/mlir/include/mlir/Analysis/DataFlowFramework.h index a3714c4332fbb..6aa0900d1412a 100644 --- a/mlir/include/mlir/Analysis/DataFlowFramework.h +++ b/mlir/include/mlir/Analysis/DataFlowFramework.h @@ -146,7 +146,7 @@ struct ProgramPoint : public StorageUniquer::BaseStorage { Operation *op = nullptr; }; -inline raw_ostream &operator<<(raw_ostream &os, ProgramPoint point) { +inline raw_ostream &operator<<(raw_ostream &os, const ProgramPoint &point) { point.print(os); return os; } @@ -662,7 +662,7 @@ inline raw_ostream &operator<<(raw_ostream &os, const AnalysisState &state) { return os; } -inline raw_ostream &operator<<(raw_ostream &os, LatticeAnchor anchor) { +inline raw_ostream &operator<<(raw_ostream &os, const LatticeAnchor &anchor) { anchor.print(os); return os; } diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.h b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.h index 87a4078f280f6..57029c64ffd00 100644 --- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.h +++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.h @@ -27,6 +27,8 @@ #include "mlir/Dialect/EmitC/IR/EmitCDialect.h.inc" #include "mlir/Dialect/EmitC/IR/EmitCEnums.h.inc" +#include + namespace mlir { namespace emitc { void buildTerminatedBody(OpBuilder &builder, Location loc); @@ -47,6 +49,10 @@ bool isSupportedFloatType(mlir::Type type); /// Determines whether \p type is a emitc.size_t/ssize_t type. bool isPointerWideType(mlir::Type type); +// Either a literal string, or an placeholder for the fmtArgs. +struct Placeholder {}; +using ReplacementItem = std::variant; + } // namespace emitc } // namespace mlir diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td index 4fbce995ce5b8..15f3a5a4742c0 100644 --- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td +++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td @@ -23,6 +23,7 @@ include "mlir/Interfaces/FunctionInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/OpAsmInterface.td" include "mlir/IR/RegionKindInterface.td" +include "mlir/IR/BuiltinAttributes.td" //===----------------------------------------------------------------------===// // EmitC op definitions @@ -56,6 +57,52 @@ def IntegerIndexOrOpaqueType : Type; def FloatIntegerIndexOrOpaqueType : AnyTypeOf<[EmitCFloatType, IntegerIndexOrOpaqueType]>; +def EmitC_FileOp + : EmitC_Op<"file", [IsolatedFromAbove, NoRegionArguments, SymbolTable, + OpAsmOpInterface]#GraphRegionNoTerminator.traits> { + let summary = "A file container operation"; + let description = [{ + A `file` represents a single C/C++ file. + + `mlir-translate` ignores the body of all `emitc.file` ops + unless the `-file-id=id` flag is used. With that flag, all `emitc.file` ops + with matching id are emitted. + + Example: + + ```mlir + emitc.file "main" { + emitc.func @func_one() { + emitc.return + } + } + ``` + }]; + + let arguments = (ins Builtin_StringAttr:$id); + let regions = (region SizedRegion<1>:$bodyRegion); + + let assemblyFormat = "$id attr-dict-with-keyword $bodyRegion"; + let builders = [OpBuilder<(ins CArg<"StringRef">:$id)>]; + let extraClassDeclaration = [{ + /// Construct a file op from the given location with a name. + static FileOp create(Location loc, StringRef name); + + //===------------------------------------------------------------------===// + // OpAsmOpInterface Methods + //===------------------------------------------------------------------===// + + /// EmitC ops in the body can omit their 'emitc.' prefix in the assembly. + static ::llvm::StringRef getDefaultDialect() { + return "emitc"; + } + }]; + + // We need to ensure that the body region has a block; + // the auto-generated builders do not guarantee that. + let skipDefaultBuilders = 1; +} + def EmitC_AddOp : EmitC_BinaryOp<"add", [CExpression]> { let summary = "Addition operation"; let description = [{ @@ -1222,10 +1269,29 @@ def EmitC_VerbatimOp : EmitC_Op<"verbatim"> { } #endif ``` + + If the `emitc.verbatim` op has operands, then the `value` is interpreted as + format string, where `{}` is a placeholder for an operand in their order. + For example, `emitc.verbatim "#pragma my src={} dst={}" %src, %dest : i32, i32` + would be emitted as `#pragma my src=a dst=b` if `%src` became `a` and + `%dest` became `b` in the C code. + `{{` in the format string is interpreted as a single `{` and doesn't introduce + a placeholder. }]; - let arguments = (ins StrAttr:$value); - let assemblyFormat = "$value attr-dict"; + let extraClassDeclaration = [{ + FailureOr> parseFormatString(); + }]; + + let arguments = (ins StrAttr:$value, Variadic:$fmtArgs); + + let builders = [OpBuilder<(ins "::mlir::StringAttr":$value), + [{ build($_builder, $_state, value, {}); }]>]; + let builders = [OpBuilder<(ins "::llvm::StringRef":$value), + [{ build($_builder, $_state, value, {}); }]>]; + let hasVerifier = 1; + let assemblyFormat = + "$value (`args` $fmtArgs^ `:` type($fmtArgs))? attr-dict"; } def EmitC_AssignOp : EmitC_Op<"assign", []> { diff --git a/mlir/include/mlir/Dialect/GPU/IR/CompilationInterfaces.h b/mlir/include/mlir/Dialect/GPU/IR/CompilationInterfaces.h index 9a890ae24d8fc..139360f8bd3fc 100644 --- a/mlir/include/mlir/Dialect/GPU/IR/CompilationInterfaces.h +++ b/mlir/include/mlir/Dialect/GPU/IR/CompilationInterfaces.h @@ -79,6 +79,12 @@ class TargetOptions { std::pair> tokenizeCmdOptions() const; + /// Returns a tokenization of the substr of the command line options that + /// starts with `startsWith` and ends with end of the command line options and + /// consumes it. + std::pair> + tokenizeAndRemoveSuffixCmdOptions(llvm::StringRef startsWith); + /// Returns the compilation target. CompilationTarget getCompilationTarget() const; diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td index 7efa4ffa2aa6f..01059e42974d0 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td @@ -451,12 +451,12 @@ def ROCDL_GlobalLoadLDSOp : def ROCDLBufferRsrc : LLVM_PointerInAddressSpace<8>; def ROCDL_MakeBufferRsrcOp : - ROCDL_IntrOp<"make.buffer.rsrc", [], [0], [Pure], 1>, + ROCDL_IntrOp<"make.buffer.rsrc", [0], [0], [Pure], 1>, Arguments<(ins LLVM_AnyPointer:$base, I16:$stride, I32:$numRecords, I32:$flags)> { - let results = (outs ROCDLBufferRsrc:$res); + let results = (outs LLVM_AnyPointer:$res); let assemblyFormat = "operands attr-dict `:` type($base) `to` type($res)"; } diff --git a/mlir/include/mlir/Dialect/Math/IR/MathOps.td b/mlir/include/mlir/Dialect/Math/IR/MathOps.td index 8a277320e2f91..16ce4e2366c76 100644 --- a/mlir/include/mlir/Dialect/Math/IR/MathOps.td +++ b/mlir/include/mlir/Dialect/Math/IR/MathOps.td @@ -560,6 +560,31 @@ def Math_ErfOp : Math_FloatUnaryOp<"erf"> { let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// ErfcOp +//===----------------------------------------------------------------------===// + +def Math_ErfcOp : Math_FloatUnaryOp<"erfc"> { + let summary = "complementary error function of the specified value"; + let description = [{ + + The `erfc` operation computes the complementary error function, defined as + 1-erf(x). This function is part of libm and is needed for accuracy, since + simply calculating 1-erf(x) when x is close to 1 will give inaccurate results. + It takes one operand of floating point type (i.e., scalar, + tensor or vector) and returns one result of the same type. It has no + standard attributes. + + Example: + + ```mlir + // Scalar error function value. + %a = math.erfc %b : f64 + ``` + }]; + let hasFolder = 1; +} + //===----------------------------------------------------------------------===// // ExpOp diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Approximation.h b/mlir/include/mlir/Dialect/Math/Transforms/Approximation.h index b4ebc2f0f8fcd..ecfdb71817dff 100644 --- a/mlir/include/mlir/Dialect/Math/Transforms/Approximation.h +++ b/mlir/include/mlir/Dialect/Math/Transforms/Approximation.h @@ -23,6 +23,14 @@ struct ErfPolynomialApproximation : public OpRewritePattern { PatternRewriter &rewriter) const final; }; +struct ErfcPolynomialApproximation : public OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(math::ErfcOp op, + PatternRewriter &rewriter) const final; +}; + } // namespace math } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h index ea7a556297a76..9adc1c6940a15 100644 --- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h @@ -47,6 +47,7 @@ struct MathPolynomialApproximationOptions { void populatePolynomialApproximateTanhPattern(RewritePatternSet &patterns); void populatePolynomialApproximateErfPattern(RewritePatternSet &patterns); +void populatePolynomialApproximateErfcPattern(RewritePatternSet &patterns); // Adds patterns to convert to f32 around math functions for which `predicate` // returns true. diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 983627027ac9c..2d8e022190f62 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -653,7 +653,7 @@ def DistributeOp : OpenMP_Op<"distribute", traits = [ will be executed in parallel by threads in the current context. These iterations are spread across threads that already exist in the enclosing region. - + The body region can only contain a single block which must contain a single operation. This operation must be another compatible loop wrapper or an `omp.loop_nest`. @@ -1023,6 +1023,7 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> { OptionalAttr:$members_index, Variadic:$bounds, /* rank-0 to rank-{n-1} */ OptionalAttr:$map_type, + OptionalAttr:$mapper_id, OptionalAttr:$map_capture_type, OptionalAttr:$name, DefaultValuedAttr:$partial_map); @@ -1076,6 +1077,8 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> { - 'map_type': OpenMP map type for this map capture, for example: from, to and always. It's a bitfield composed of the OpenMP runtime flags stored in OpenMPOffloadMappingFlags. + - 'mapper_id': OpenMP mapper map type modifier for this map capture. It's used to + specify a user defined mapper to be used for mapping. - 'map_capture_type': Capture type for the variable e.g. this, byref, byvalue, byvla this can affect how the variable is lowered. - `name`: Holds the name of variable as specified in user clause (including bounds). @@ -1087,6 +1090,7 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> { `var_ptr` `(` $var_ptr `:` type($var_ptr) `,` $var_type `)` oilist( `var_ptr_ptr` `(` $var_ptr_ptr `:` type($var_ptr_ptr) `)` + | `mapper` `(` $mapper_id `)` | `map_clauses` `(` custom($map_type) `)` | `capture` `(` custom($map_capture_type) `)` | `members` `(` $members `:` custom($members_index) `:` type($members) `)` @@ -1749,6 +1753,62 @@ def ScanOp : OpenMP_Op<"scan", [ let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// 2.19.7.3 Declare Mapper Directive +//===----------------------------------------------------------------------===// +def DeclareMapperOp : OpenMP_Op<"declare_mapper", [ + IsolatedFromAbove, + RecipeInterface, + SingleBlock, + Symbol + ]> { + let summary = "declare mapper directive"; + let description = [{ + The declare mapper directive declares a user-defined mapper for a given + type, and defines a mapper-identifier that can be used in a map clause. + }] # clausesDescription; + + let arguments = (ins SymbolNameAttr:$sym_name, + TypeAttr:$type); + + let regions = (region AnyRegion:$body); + + let assemblyFormat = "$sym_name `:` $type $body attr-dict"; + + let extraClassDeclaration = [{ + /// Get DeclareMapperInfoOp. + DeclareMapperInfoOp getDeclareMapperInfo(){ + return cast(getRegion().getBlocks().front().getTerminator()); + } + + /// Get SymVal block argument + BlockArgument getSymVal(){ + return getRegion().getArgument(0); + } + }]; + + let hasRegionVerifier = 1; +} + +def DeclareMapperInfoOp : OpenMP_Op<"declare_mapper.info", [ + HasParent<"DeclareMapperOp">, + Terminator + ], clauses = [ + OpenMP_MapClause + ]> { + let summary = "declare mapper info"; + let description = [{ + This Op is used to capture the map information related to it's + parent DeclareMapperOp. + }] # clausesDescription; + + let builders = [ + OpBuilder<(ins CArg<"const DeclareMapperInfoOperands &">:$clauses)> + ]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // 2.19.5.7 declare reduction Directive //===----------------------------------------------------------------------===// @@ -1861,7 +1921,7 @@ def MaskedOp : OpenMP_Op<"masked", clauses = [ ], singleRegion = 1> { let summary = "masked construct"; let description = [{ - Masked construct allows to specify a structured block to be executed by a subset of + Masked construct allows to specify a structured block to be executed by a subset of threads of the current team. }] # clausesDescription; diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h index 02ffa0da7a8b8..c0c11c9e38994 100644 --- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h +++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h @@ -126,6 +126,9 @@ FailureOr loopUnrollByFactor( scf::ForOp forOp, uint64_t unrollFactor, function_ref annotateFn = nullptr); +/// Unrolls this loop completely. +LogicalResult loopUnrollFull(scf::ForOp forOp); + /// Unrolls and jams this `scf.for` operation by the specified unroll factor. /// Returns failure if the loop cannot be unrolled either due to restrictions or /// due to invalid unroll factors. In case of unroll factor of 1, the function diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td index 1eacc564655a8..cafe140469570 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td @@ -4445,6 +4445,7 @@ def SPIRV_OC_OpSelectionMerge : I32EnumAttrCase<"OpSelectionMerge def SPIRV_OC_OpLabel : I32EnumAttrCase<"OpLabel", 248>; def SPIRV_OC_OpBranch : I32EnumAttrCase<"OpBranch", 249>; def SPIRV_OC_OpBranchConditional : I32EnumAttrCase<"OpBranchConditional", 250>; +def SPIRV_OC_OpKill : I32EnumAttrCase<"OpKill", 252>; def SPIRV_OC_OpReturn : I32EnumAttrCase<"OpReturn", 253>; def SPIRV_OC_OpReturnValue : I32EnumAttrCase<"OpReturnValue", 254>; def SPIRV_OC_OpUnreachable : I32EnumAttrCase<"OpUnreachable", 255>; @@ -4574,7 +4575,7 @@ def SPIRV_OpcodeAttr : SPIRV_OC_OpAtomicAnd, SPIRV_OC_OpAtomicOr, SPIRV_OC_OpAtomicXor, SPIRV_OC_OpPhi, SPIRV_OC_OpLoopMerge, SPIRV_OC_OpSelectionMerge, SPIRV_OC_OpLabel, SPIRV_OC_OpBranch, SPIRV_OC_OpBranchConditional, - SPIRV_OC_OpReturn, SPIRV_OC_OpReturnValue, SPIRV_OC_OpUnreachable, + SPIRV_OC_OpKill, SPIRV_OC_OpReturn, SPIRV_OC_OpReturnValue, SPIRV_OC_OpUnreachable, SPIRV_OC_OpGroupBroadcast, SPIRV_OC_OpGroupIAdd, SPIRV_OC_OpGroupFAdd, SPIRV_OC_OpGroupFMin, SPIRV_OC_OpGroupUMin, SPIRV_OC_OpGroupSMin, SPIRV_OC_OpGroupFMax, SPIRV_OC_OpGroupUMax, SPIRV_OC_OpGroupSMax, diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td index cc2f0e4962d8a..ade20f915c0c3 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td @@ -242,6 +242,48 @@ def SPIRV_FunctionCallOp : SPIRV_Op<"FunctionCall", [ // ----- +def SPIRV_KillOp : SPIRV_Op<"Kill", [Terminator]> { + let summary = [{ + Deprecated (use OpTerminateInvocation or OpDemoteToHelperInvocation). + }]; + + let description = [{ + Fragment-shader discard. + + Ceases all further processing in any invocation that executes it: Only + instructions these invocations executed before OpKill have observable + side effects. If this instruction is executed in non-uniform control + flow, all subsequent control flow is non-uniform (for invocations that + continue to execute). + + This instruction must be the last instruction in a block. + + This instruction is only valid in the Fragment Execution Model. + + + + #### Example: + + ```mlir + spirv.Kill + ``` + }]; + + let availability = [ + MinVersion, + MaxVersion, + Extension<[]>, + Capability<[SPIRV_C_Shader]> + ]; + + let arguments = (ins); + let results = (outs); + let assemblyFormat = "attr-dict"; + let hasVerifier = 0; +} + +// ----- + def SPIRV_LoopOp : SPIRV_Op<"mlir.loop", [InFunctionScope]> { let summary = "Define a structured loop."; diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index d11ba65a13736..4d5837ca26c91 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1796,9 +1796,9 @@ def Tosa_ResizeOp : Tosa_InferShapedTypeOp<"resize"> { let arguments = (ins Tosa_Tensor4D:$input, - Tosa_IntArrayAttr4:$scale, - Tosa_IntArrayAttr2:$offset, - Tosa_IntArrayAttr2:$border, + Rank4TosaShape:$scale, + Rank2TosaShape:$offset, + Rank2TosaShape:$border, Tosa_ResizeTypeAttr:$mode ); @@ -1807,6 +1807,7 @@ def Tosa_ResizeOp : Tosa_InferShapedTypeOp<"resize"> { ); let hasFolder = 1; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h index 4e2f1b9cb19a9..3e80a7321ad8c 100644 --- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h +++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h @@ -240,6 +240,9 @@ SmallVector convertFromMlirShape(ArrayRef shape); bool getConstShapeValue(Operation *op, llvm::SmallVector &result_shape); +// returns a small vector of int64_t values that attr contains +SmallVector convertFromIntAttr(const DenseElementsAttr &attr, + const int rank); } // namespace tosa } // namespace mlir diff --git a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td index 16181d7e760db..566013e73f4b8 100644 --- a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td +++ b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td @@ -341,6 +341,46 @@ def DotBF16Ps512IntrOp : AVX512_IntrOp<"dpbf16ps.512", 1, [Pure, let results = (outs VectorOfLengthAndType<[16], [F32]>:$res); } +//----------------------------------------------------------------------------// +// Convert packed F32 to packed BF16 +//----------------------------------------------------------------------------// + +def CvtPackedF32ToBF16Op : AVX512_Op<"cvt.packed.f32_to_bf16", [Pure, + AllElementCountsMatch<["a", "dst"]>]> { + let summary = "Convert packed F32 to packed BF16 Data."; + let description = [{ + The `convert_f32_to_bf16` op is an AVX512-BF16 specific op that can lower + to the proper LLVMAVX512BF16 operation `llvm.cvtneps2bf16` depending on + the width of MLIR vectors it is applied to. + + #### From the Intel Intrinsics Guide: + + Convert packed single-precision (32-bit) floating-point elements in `a` to + packed BF16 (16-bit) floating-point elements, and store the results in `dst`. + + Example: + ```mlir + %dst = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<8xf32> -> vector<8xbf16> + ``` + }]; + let arguments = (ins VectorOfLengthAndType<[8, 16], [F32]>:$a); + let results = (outs VectorOfLengthAndType<[8, 16], [BF16]>:$dst); + let assemblyFormat = + "$a attr-dict `:` type($a) `->` type($dst)"; +} + +def CvtNeF32ToBF16Ps256IntrOp : AVX512_IntrOp<"cvtneps2bf16.256", 1, [Pure], + /*extension=*/"bf16"> { + let arguments = (ins VectorOfLengthAndType<[8], [F32]>:$a); + let results = (outs VectorOfLengthAndType<[8], [BF16]>:$res); +} + +def CvtNeF32ToBF16Ps512IntrOp : AVX512_IntrOp<"cvtneps2bf16.512", 1, [Pure], + /*extension=*/"bf16"> { + let arguments = (ins VectorOfLengthAndType<[16], [F32]>:$a); + let results = (outs VectorOfLengthAndType<[16], [BF16]>:$res); +} + //===----------------------------------------------------------------------===// // AVX op definitions //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/IR/CMakeLists.txt b/mlir/include/mlir/IR/CMakeLists.txt index 0c7937dfd69e5..846547ff131e3 100644 --- a/mlir/include/mlir/IR/CMakeLists.txt +++ b/mlir/include/mlir/IR/CMakeLists.txt @@ -2,6 +2,8 @@ add_mlir_interface(SymbolInterfaces) add_mlir_interface(RegionKindInterface) set(LLVM_TARGET_DEFINITIONS OpAsmInterface.td) +mlir_tablegen(OpAsmAttrInterface.h.inc -gen-attr-interface-decls) +mlir_tablegen(OpAsmAttrInterface.cpp.inc -gen-attr-interface-defs) mlir_tablegen(OpAsmOpInterface.h.inc -gen-op-interface-decls) mlir_tablegen(OpAsmOpInterface.cpp.inc -gen-op-interface-defs) mlir_tablegen(OpAsmTypeInterface.h.inc -gen-type-interface-decls) diff --git a/mlir/include/mlir/IR/OpAsmInterface.td b/mlir/include/mlir/IR/OpAsmInterface.td index 34c830a12856f..1bd8eb04714c5 100644 --- a/mlir/include/mlir/IR/OpAsmInterface.td +++ b/mlir/include/mlir/IR/OpAsmInterface.td @@ -127,6 +127,35 @@ def OpAsmTypeInterface : TypeInterface<"OpAsmTypeInterface"> { "void", "getAsmName", (ins "::mlir::OpAsmSetNameFn":$setNameFn), "", ";" >, + InterfaceMethod<[{ + Get a name to use when generating an alias for this type. + }], + "::mlir::OpAsmDialectInterface::AliasResult", "getAlias", + (ins "::llvm::raw_ostream&":$os), "", + "return ::mlir::OpAsmDialectInterface::AliasResult::NoAlias;" + >, + ]; +} + +//===----------------------------------------------------------------------===// +// OpAsmAttrInterface +//===----------------------------------------------------------------------===// + +def OpAsmAttrInterface : AttrInterface<"OpAsmAttrInterface"> { + let description = [{ + This interface provides hooks to interact with the AsmPrinter and AsmParser + classes. + }]; + let cppNamespace = "::mlir"; + + let methods = [ + InterfaceMethod<[{ + Get a name to use when generating an alias for this attribute. + }], + "::mlir::OpAsmDialectInterface::AliasResult", "getAlias", + (ins "::llvm::raw_ostream&":$os), "", + "return ::mlir::OpAsmDialectInterface::AliasResult::NoAlias;" + >, ]; } diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h index 5eb8b4a5cff5b..a863e881ee7c8 100644 --- a/mlir/include/mlir/IR/OpImplementation.h +++ b/mlir/include/mlir/IR/OpImplementation.h @@ -1825,6 +1825,7 @@ ParseResult parseDimensionList(OpAsmParser &parser, //===--------------------------------------------------------------------===// /// The OpAsmOpInterface, see OpAsmInterface.td for more details. +#include "mlir/IR/OpAsmAttrInterface.h.inc" #include "mlir/IR/OpAsmOpInterface.h.inc" #include "mlir/IR/OpAsmTypeInterface.h.inc" diff --git a/mlir/include/mlir/Target/Cpp/CppEmitter.h b/mlir/include/mlir/Target/Cpp/CppEmitter.h index 99d8696cc8e07..7c5747a888261 100644 --- a/mlir/include/mlir/Target/Cpp/CppEmitter.h +++ b/mlir/include/mlir/Target/Cpp/CppEmitter.h @@ -14,6 +14,7 @@ #define MLIR_TARGET_CPP_CPPEMITTER_H #include "mlir/Support/LLVM.h" +#include "llvm/ADT/StringRef.h" namespace mlir { class Operation; @@ -23,8 +24,11 @@ namespace emitc { /// the region of 'op' need almost all be in EmitC dialect. The parameter /// 'declareVariablesAtTop' enforces that all variables for op results and block /// arguments are declared at the beginning of the function. +/// If parameter 'fileId' is non-empty, then body of `emitc.file` ops +/// with matching id are emitted. LogicalResult translateToCpp(Operation *op, raw_ostream &os, - bool declareVariablesAtTop = false); + bool declareVariablesAtTop = false, + StringRef fileId = {}); } // namespace emitc } // namespace mlir diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h index 4642d58760ca8..6c673295d8dcc 100644 --- a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h +++ b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h @@ -407,6 +407,10 @@ class ModuleImport { /// always requires a symbol name. FlatSymbolRefAttr getOrCreateNamelessSymbolName(llvm::GlobalVariable *globalVar); + /// Returns the global insertion point for the next global operation. If the + /// `globalInsertionOp` is set, the insertion point is placed after the + /// specified operation. Otherwise, it defaults to the start of the module. + OpBuilder::InsertionGuard setGlobalInsertionPoint(); /// Builder pointing at where the next instruction should be generated. OpBuilder builder; @@ -416,8 +420,6 @@ class ModuleImport { Operation *constantInsertionOp = nullptr; /// Operation to insert the next global after. Operation *globalInsertionOp = nullptr; - /// Operation to insert the next alias after. - Operation *aliasInsertionOp = nullptr; /// Operation to insert comdat selector operations into. ComdatOp globalComdatOp = nullptr; /// The current context. diff --git a/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp b/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp index c21ee9652b499..c4792884eb34a 100644 --- a/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp +++ b/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp @@ -181,6 +181,7 @@ void mlir::populateMathToLibmConversionPatterns(RewritePatternSet &patterns, populatePatternsForOp(patterns, benefit, ctx, "cosf", "cos"); populatePatternsForOp(patterns, benefit, ctx, "coshf", "cosh"); populatePatternsForOp(patterns, benefit, ctx, "erff", "erf"); + populatePatternsForOp(patterns, benefit, ctx, "erfcf", "erfc"); populatePatternsForOp(patterns, benefit, ctx, "expf", "exp"); populatePatternsForOp(patterns, benefit, ctx, "exp2f", "exp2"); populatePatternsForOp(patterns, benefit, ctx, "expm1f", diff --git a/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp b/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp index 12e3c07669839..7888745dc6920 100644 --- a/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp +++ b/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp @@ -186,6 +186,32 @@ struct MapInfoOpConversion : public ConvertOpToLLVMPattern { } }; +struct DeclMapperOpConversion + : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + LogicalResult + matchAndRewrite(omp::DeclareMapperOp curOp, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + const TypeConverter *converter = ConvertToLLVMPattern::getTypeConverter(); + SmallVector newAttrs; + newAttrs.emplace_back(curOp.getSymNameAttrName(), curOp.getSymNameAttr()); + newAttrs.emplace_back( + curOp.getTypeAttrName(), + TypeAttr::get(converter->convertType(curOp.getType()))); + + auto newOp = rewriter.create( + curOp.getLoc(), TypeRange(), adaptor.getOperands(), newAttrs); + rewriter.inlineRegionBefore(curOp.getRegion(), newOp.getRegion(), + newOp.getRegion().end()); + if (failed(rewriter.convertRegionTypes(&newOp.getRegion(), + *this->getTypeConverter()))) + return failure(); + + rewriter.eraseOp(curOp); + return success(); + } +}; + template struct MultiRegionOpConversion : public ConvertOpToLLVMPattern { using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; @@ -225,19 +251,21 @@ void mlir::configureOpenMPToLLVMConversionLegality( ConversionTarget &target, const LLVMTypeConverter &typeConverter) { target.addDynamicallyLegalOp< omp::AtomicReadOp, omp::AtomicWriteOp, omp::CancellationPointOp, - omp::CancelOp, omp::CriticalDeclareOp, omp::FlushOp, omp::MapBoundsOp, - omp::MapInfoOp, omp::OrderedOp, omp::ScanOp, omp::TargetEnterDataOp, - omp::TargetExitDataOp, omp::TargetUpdateOp, omp::ThreadprivateOp, - omp::YieldOp>([&](Operation *op) { - return typeConverter.isLegal(op->getOperandTypes()) && - typeConverter.isLegal(op->getResultTypes()); - }); + omp::CancelOp, omp::CriticalDeclareOp, omp::DeclareMapperInfoOp, + omp::FlushOp, omp::MapBoundsOp, omp::MapInfoOp, omp::OrderedOp, + omp::ScanOp, omp::TargetEnterDataOp, omp::TargetExitDataOp, + omp::TargetUpdateOp, omp::ThreadprivateOp, omp::YieldOp>( + [&](Operation *op) { + return typeConverter.isLegal(op->getOperandTypes()) && + typeConverter.isLegal(op->getResultTypes()); + }); target.addDynamicallyLegalOp< - omp::AtomicUpdateOp, omp::CriticalOp, omp::DeclareReductionOp, - omp::DistributeOp, omp::LoopNestOp, omp::LoopOp, omp::MasterOp, - omp::OrderedRegionOp, omp::ParallelOp, omp::SectionOp, omp::SectionsOp, - omp::SimdOp, omp::SingleOp, omp::TargetDataOp, omp::TargetOp, - omp::TaskgroupOp, omp::TaskloopOp, omp::TaskOp, omp::TeamsOp, + omp::AtomicUpdateOp, omp::CriticalOp, omp::DeclareMapperOp, + omp::DeclareReductionOp, omp::DistributeOp, omp::LoopNestOp, omp::LoopOp, + omp::MasterOp, omp::OrderedRegionOp, omp::ParallelOp, + omp::PrivateClauseOp, omp::SectionOp, omp::SectionsOp, omp::SimdOp, + omp::SingleOp, omp::TargetDataOp, omp::TargetOp, omp::TaskgroupOp, + omp::TaskloopOp, omp::TaskOp, omp::TeamsOp, omp::WsloopOp>([&](Operation *op) { return std::all_of(op->getRegions().begin(), op->getRegions().end(), [&](Region ®ion) { @@ -267,12 +295,13 @@ void mlir::populateOpenMPToLLVMConversionPatterns(LLVMTypeConverter &converter, [&](omp::MapBoundsType type) -> Type { return type; }); patterns.add< - AtomicReadOpConversion, MapInfoOpConversion, + AtomicReadOpConversion, DeclMapperOpConversion, MapInfoOpConversion, MultiRegionOpConversion, MultiRegionOpConversion, RegionLessOpConversion, RegionLessOpConversion, RegionLessOpConversion, + RegionLessOpConversion, RegionLessOpConversion, RegionLessOpConversion, RegionLessOpConversion, diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index d849c782bf08b..7b70b3ab8afc9 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1387,7 +1387,10 @@ class ResizeUnaryConverter : public OpRewritePattern { return success(); } - ArrayRef scale = op.getScale(); + SmallVector scale; + if (!tosa::getConstShapeValue(op.getScale().getDefiningOp(), scale)) { + return failure(); + } // Collapse the unit width and height away. SmallVector reassociationMap(2); @@ -1488,8 +1491,9 @@ class MaterializeResizeBroadcast : public OpRewritePattern { resizeShape.push_back(channels); auto resizeTy = resultTy.clone(resizeShape); - auto resize = - builder.create(resizeTy, input, op->getAttrs()); + auto resize = builder.create(resizeTy, input, op.getScale(), + op.getOffset(), op.getBorder(), + op.getMode()); // Collapse an unit result dims. SmallVector reassociationMap(2); @@ -1604,9 +1608,14 @@ class GenericResizeConverter : public OpRewritePattern { Value inY = b.create(b.getI32Type(), y); Value inX = b.create(b.getI32Type(), x); - ArrayRef offset = op.getOffset(); - ArrayRef border = op.getBorder(); - ArrayRef scale = op.getScale(); + SmallVector scale, offset, border; + if (!tosa::getConstShapeValue(op.getScale().getDefiningOp(), scale) || + !tosa::getConstShapeValue(op.getOffset().getDefiningOp(), offset) || + !tosa::getConstShapeValue(op.getBorder().getDefiningOp(), border)) { + return rewriter.notifyMatchFailure( + op, "tosa.resize scale/offset/border should have compile time " + "constant values."); + } Value yScaleN, yScaleD, xScaleN, xScaleD; yScaleN = b.create(b.getI32IntegerAttr(scale[0])); diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp index 728a2d33f46e7..1e8952a7edf4e 100644 --- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp +++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp @@ -19,6 +19,7 @@ #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/FormatVariadic.h" using namespace mlir; using namespace mlir::emitc; @@ -167,6 +168,63 @@ static LogicalResult verifyInitializationAttribute(Operation *op, return success(); } +/// Parse a format string and return a list of its parts. +/// A part is either a StringRef that has to be printed as-is, or +/// a Placeholder which requires printing the next operand of the VerbatimOp. +/// In the format string, all `{}` are replaced by Placeholders, except if the +/// `{` is escaped by `{{` - then it doesn't start a placeholder. +template +FailureOr> +parseFormatString(StringRef toParse, ArgType fmtArgs, + std::optional> + emitError = {}) { + SmallVector items; + + // If there are not operands, the format string is not interpreted. + if (fmtArgs.empty()) { + items.push_back(toParse); + return items; + } + + while (!toParse.empty()) { + size_t idx = toParse.find('{'); + if (idx == StringRef::npos) { + // No '{' + items.push_back(toParse); + break; + } + if (idx > 0) { + // Take all chars excluding the '{'. + items.push_back(toParse.take_front(idx)); + toParse = toParse.drop_front(idx); + continue; + } + if (toParse.size() < 2) { + return (*emitError)() + << "expected '}' after unescaped '{' at end of string"; + } + // toParse contains at least two characters and starts with `{`. + char nextChar = toParse[1]; + if (nextChar == '{') { + // Double '{{' -> '{' (escaping). + items.push_back(toParse.take_front(1)); + toParse = toParse.drop_front(2); + continue; + } + if (nextChar == '}') { + items.push_back(Placeholder{}); + toParse = toParse.drop_front(2); + continue; + } + + if (emitError.has_value()) { + return (*emitError)() << "expected '}' after unescaped '{'"; + } + return failure(); + } + return items; +} + //===----------------------------------------------------------------------===// // AddOp //===----------------------------------------------------------------------===// @@ -909,6 +967,56 @@ LogicalResult emitc::SubscriptOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// VerbatimOp +//===----------------------------------------------------------------------===// + +LogicalResult emitc::VerbatimOp::verify() { + auto errorCallback = [&]() -> InFlightDiagnostic { + return this->emitOpError(); + }; + FailureOr> fmt = + ::parseFormatString(getValue(), getFmtArgs(), errorCallback); + if (failed(fmt)) + return failure(); + + size_t numPlaceholders = llvm::count_if(*fmt, [](ReplacementItem &item) { + return std::holds_alternative(item); + }); + + if (numPlaceholders != getFmtArgs().size()) { + return emitOpError() + << "requires operands for each placeholder in the format string"; + } + return success(); +} + +[[maybe_unused]] static ParseResult +parseVariadicTypeFmtArgs(AsmParser &p, SmallVector ¶ms) { + Type type; + if (p.parseType(type)) + return failure(); + + params.push_back(type); + while (succeeded(p.parseOptionalComma())) { + if (p.parseType(type)) + return failure(); + params.push_back(type); + } + + return success(); +} + +[[maybe_unused]] static void printVariadicTypeFmtArgs(AsmPrinter &p, + ArrayRef params) { + llvm::interleaveComma(params, p, [&](Type type) { p.printType(type); }); +} + +FailureOr> emitc::VerbatimOp::parseFormatString() { + // Error checking is done in verify. + return ::parseFormatString(getValue(), getFmtArgs()); +} + //===----------------------------------------------------------------------===// // EmitC Enums //===----------------------------------------------------------------------===// @@ -1289,6 +1397,15 @@ void SwitchOp::getRegionInvocationBounds( bounds.emplace_back(/*lb=*/0, /*ub=*/regIndex == liveIndex); } +//===----------------------------------------------------------------------===// +// FileOp +//===----------------------------------------------------------------------===// +void FileOp::build(OpBuilder &builder, OperationState &state, StringRef id) { + state.addRegion()->emplaceBlock(); + state.attributes.push_back( + builder.getNamedAttr("id", builder.getStringAttr(id))); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index 1bdeb3e356f4b..976432ea37120 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -2591,6 +2591,18 @@ TargetOptions::tokenizeCmdOptions() const { return tokenizeCmdOptions(cmdOptions); } +std::pair> +TargetOptions::tokenizeAndRemoveSuffixCmdOptions(llvm::StringRef startsWith) { + size_t startPos = cmdOptions.find(startsWith); + if (startPos == std::string::npos) + return {llvm::BumpPtrAllocator(), SmallVector()}; + + auto tokenized = + tokenizeCmdOptions(cmdOptions.substr(startPos + startsWith.size())); + cmdOptions.resize(startPos); + return tokenized; +} + MLIR_DEFINE_EXPLICIT_TYPE_ID(::mlir::gpu::TargetOptions) #include "mlir/Dialect/GPU/IR/GPUOpInterfaces.cpp.inc" diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp index 466a9799295f9..c16c38ea22a5d 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp @@ -13,8 +13,6 @@ #include "mlir/Dialect/Arith/Utils/Utils.h" #include "mlir/Dialect/Complex/IR/Complex.h" #include "mlir/Dialect/Linalg/IR/Linalg.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/AffineExprVisitor.h" #include "mlir/IR/AffineMap.h" diff --git a/mlir/lib/Dialect/Math/IR/MathOps.cpp b/mlir/lib/Dialect/Math/IR/MathOps.cpp index 42e357c012739..9c4d88e2191ce 100644 --- a/mlir/lib/Dialect/Math/IR/MathOps.cpp +++ b/mlir/lib/Dialect/Math/IR/MathOps.cpp @@ -332,6 +332,24 @@ OpFoldResult math::ErfOp::fold(FoldAdaptor adaptor) { }); } +//===----------------------------------------------------------------------===// +// ErfcOp folder +//===----------------------------------------------------------------------===// + +OpFoldResult math::ErfcOp::fold(FoldAdaptor adaptor) { + return constFoldUnaryOpConditional( + adaptor.getOperands(), [](const APFloat &a) -> std::optional { + switch (APFloat::SemanticsToEnum(a.getSemantics())) { + case APFloat::Semantics::S_IEEEdouble: + return APFloat(erfc(a.convertToDouble())); + case APFloat::Semantics::S_IEEEsingle: + return APFloat(erfcf(a.convertToFloat())); + default: + return {}; + } + }); +} + //===----------------------------------------------------------------------===// // IPowIOp folder //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp index 777427de9465c..167eebd786dba 100644 --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -173,6 +173,10 @@ handleMultidimensionalVectors(ImplicitLocOpBuilder &builder, // Helper functions to create constants. //----------------------------------------------------------------------------// +static Value boolCst(ImplicitLocOpBuilder &builder, bool value) { + return builder.create(builder.getBoolAttr(value)); +} + static Value floatCst(ImplicitLocOpBuilder &builder, float value, Type elementType) { assert((elementType.isF16() || elementType.isF32()) && @@ -1118,6 +1122,103 @@ ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op, return success(); } +// Approximates erfc(x) with p((x - 2) / (x + 2)), where p is a 9 degree +// polynomial.This approximation is based on the following stackoverflow post: +// https://stackoverflow.com/questions/35966695/vectorizable-implementation-of-complementary-error-function-erfcf +// The stackoverflow post is in turn based on: +// M. M. Shepherd and J. G. Laframboise, "Chebyshev Approximation of +// (1+2x)exp(x^2)erfc x in 0 <= x < INF", Mathematics of Computation, Vol. 36, +// No. 153, January 1981, pp. 249-253. +// +// Maximum error: 2.65 ulps +LogicalResult +ErfcPolynomialApproximation::matchAndRewrite(math::ErfcOp op, + PatternRewriter &rewriter) const { + Value x = op.getOperand(); + Type et = getElementTypeOrSelf(x); + + if (!et.isF32()) + return rewriter.notifyMatchFailure(op, "only f32 type is supported."); + std::optional shape = vectorShape(x); + + ImplicitLocOpBuilder builder(op->getLoc(), rewriter); + auto bcast = [&](Value value) -> Value { + return broadcast(builder, value, shape); + }; + + Value trueValue = bcast(boolCst(builder, true)); + Value zero = bcast(floatCst(builder, 0.0f, et)); + Value one = bcast(floatCst(builder, 1.0f, et)); + Value onehalf = bcast(floatCst(builder, 0.5f, et)); + Value neg4 = bcast(floatCst(builder, -4.0f, et)); + Value neg2 = bcast(floatCst(builder, -2.0f, et)); + Value pos2 = bcast(floatCst(builder, 2.0f, et)); + Value posInf = bcast(floatCst(builder, INFINITY, et)); + Value clampVal = bcast(floatCst(builder, 10.0546875f, et)); + + Value a = builder.create(x); + Value p = builder.create(a, pos2); + Value r = builder.create(one, p); + Value q = builder.create(neg4, r, one); + Value t = builder.create(builder.create(q, one), + neg2, a); + Value e = builder.create(builder.create(a), q, t); + q = builder.create(r, e, q); + + p = bcast(floatCst(builder, -0x1.a4a000p-12f, et)); // -4.01139259e-4 + Value c1 = bcast(floatCst(builder, -0x1.42a260p-10f, et)); // -1.23075210e-3 + p = builder.create(p, q, c1); + Value c2 = bcast(floatCst(builder, 0x1.585714p-10f, et)); // 1.31355342e-3 + p = builder.create(p, q, c2); + Value c3 = bcast(floatCst(builder, 0x1.1adcc4p-07f, et)); // 8.63227434e-3 + p = builder.create(p, q, c3); + Value c4 = bcast(floatCst(builder, -0x1.081b82p-07f, et)); // -8.05991981e-3 + p = builder.create(p, q, c4); + Value c5 = bcast(floatCst(builder, -0x1.bc0b6ap-05f, et)); // -5.42046614e-2 + p = builder.create(p, q, c5); + Value c6 = bcast(floatCst(builder, 0x1.4ffc46p-03f, et)); // 1.64055392e-1 + p = builder.create(p, q, c6); + Value c7 = bcast(floatCst(builder, -0x1.540840p-03f, et)); // -1.66031361e-1 + p = builder.create(p, q, c7); + Value c8 = bcast(floatCst(builder, -0x1.7bf616p-04f, et)); // -9.27639827e-2 + p = builder.create(p, q, c8); + Value c9 = bcast(floatCst(builder, 0x1.1ba03ap-02f, et)); // 2.76978403e-1 + p = builder.create(p, q, c9); + + Value d = builder.create(pos2, a, one); + r = builder.create(one, d); + q = builder.create(p, r, r); + Value negfa = builder.create(a); + Value fmaqah = builder.create(q, negfa, onehalf); + Value psubq = builder.create(p, q); + e = builder.create(fmaqah, pos2, psubq); + r = builder.create(e, r, q); + + Value s = builder.create(a, a); + e = builder.create(builder.create(s)); + + t = builder.create(builder.create(a), a, s); + r = builder.create( + r, e, + builder.create(builder.create(r, e), t)); + + Value isNotLessThanInf = builder.create( + builder.create(arith::CmpFPredicate::OLT, a, posInf), + trueValue); + r = builder.create(isNotLessThanInf, + builder.create(x, x), r); + Value isGreaterThanClamp = + builder.create(arith::CmpFPredicate::OGT, a, clampVal); + r = builder.create(isGreaterThanClamp, zero, r); + + Value isNegative = + builder.create(arith::CmpFPredicate::OLT, x, zero); + r = builder.create( + isNegative, builder.create(pos2, r), r); + + rewriter.replaceOp(op, r); + return success(); +} //----------------------------------------------------------------------------// // Exp approximation. //----------------------------------------------------------------------------// @@ -1667,6 +1768,11 @@ void mlir::populatePolynomialApproximateErfPattern( patterns.add(patterns.getContext()); } +void mlir::populatePolynomialApproximateErfcPattern( + RewritePatternSet &patterns) { + patterns.add(patterns.getContext()); +} + template static void populateMathF32ExpansionPattern(RewritePatternSet &patterns, @@ -1690,6 +1796,7 @@ void mlir::populateMathF32ExpansionPatterns( populateMathF32ExpansionPattern(patterns, predicate); populateMathF32ExpansionPattern(patterns, predicate); populateMathF32ExpansionPattern(patterns, predicate); + populateMathF32ExpansionPattern(patterns, predicate); populateMathF32ExpansionPattern(patterns, predicate); populateMathF32ExpansionPattern(patterns, predicate); populateMathF32ExpansionPattern(patterns, predicate); @@ -1734,6 +1841,9 @@ void mlir::populateMathPolynomialApproximationPatterns( CosOp, SinAndCosApproximation>(patterns, predicate); populateMathPolynomialApproximationPattern( patterns, predicate); + populateMathPolynomialApproximationPattern( + patterns, predicate); populateMathPolynomialApproximationPattern( patterns, predicate); populateMathPolynomialApproximationPattern( @@ -1760,9 +1870,10 @@ void mlir::populateMathPolynomialApproximationPatterns( {math::AtanOp::getOperationName(), math::Atan2Op::getOperationName(), math::TanhOp::getOperationName(), math::LogOp::getOperationName(), math::Log2Op::getOperationName(), math::Log1pOp::getOperationName(), - math::ErfOp::getOperationName(), math::ExpOp::getOperationName(), - math::ExpM1Op::getOperationName(), math::CbrtOp::getOperationName(), - math::SinOp::getOperationName(), math::CosOp::getOperationName()}, + math::ErfOp::getOperationName(), math::ErfcOp::getOperationName(), + math::ExpOp::getOperationName(), math::ExpM1Op::getOperationName(), + math::CbrtOp::getOperationName(), math::SinOp::getOperationName(), + math::CosOp::getOperationName()}, name); }); @@ -1774,8 +1885,9 @@ void mlir::populateMathPolynomialApproximationPatterns( math::TanhOp::getOperationName(), math::LogOp::getOperationName(), math::Log2Op::getOperationName(), math::Log1pOp::getOperationName(), math::ErfOp::getOperationName(), - math::AsinOp::getOperationName(), math::AcosOp::getOperationName(), - math::ExpOp::getOperationName(), math::ExpM1Op::getOperationName(), + math::ErfcOp::getOperationName(), math::AsinOp::getOperationName(), + math::AcosOp::getOperationName(), math::ExpOp::getOperationName(), + math::ExpM1Op::getOperationName(), math::CbrtOp::getOperationName(), math::SinOp::getOperationName(), math::CosOp::getOperationName()}, name); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 5ec840e7fef81..62e1c4c3ed3b1 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -32,6 +32,7 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPDeviceConstants.h" +#include "llvm/Support/Casting.h" #include #include #include @@ -1631,7 +1632,13 @@ static LogicalResult verifyMapClause(Operation *op, OperandRange mapVars) { to ? updateToVars.insert(updateVar) : updateFromVars.insert(updateVar); } - } else { + + if (mapInfoOp.getMapperId() && + !SymbolTable::lookupNearestSymbolFrom( + mapInfoOp, mapInfoOp.getMapperIdAttr())) { + return emitError(op->getLoc(), "invalid mapper id"); + } + } else if (!isa(op)) { emitError(op->getLoc(), "map argument is not a map entry operation"); } } @@ -2432,6 +2439,22 @@ LogicalResult DistributeOp::verifyRegions() { return success(); } +//===----------------------------------------------------------------------===// +// DeclareMapperOp / DeclareMapperInfoOp +//===----------------------------------------------------------------------===// + +LogicalResult DeclareMapperInfoOp::verify() { + return verifyMapClause(*this, getMapVars()); +} + +LogicalResult DeclareMapperOp::verifyRegions() { + if (!llvm::isa_and_present( + getRegion().getBlocks().front().getTerminator())) + return emitOpError() << "expected terminator to be a DeclareMapperInfoOp"; + + return success(); +} + //===----------------------------------------------------------------------===// // DeclareReductionOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp index fa82bcb816a2a..bc1cb24303ad2 100644 --- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp @@ -498,6 +498,20 @@ FailureOr mlir::loopUnrollByFactor( return resultLoops; } +/// Unrolls this loop completely. +LogicalResult mlir::loopUnrollFull(scf::ForOp forOp) { + IRRewriter rewriter(forOp.getContext()); + std::optional mayBeConstantTripCount = getConstantTripCount(forOp); + if (!mayBeConstantTripCount.has_value()) + return failure(); + uint64_t tripCount = *mayBeConstantTripCount; + if (tripCount == 0) + return success(); + if (tripCount == 1) + return forOp.promoteIfSingleIteration(rewriter); + return loopUnrollByFactor(forOp, tripCount); +} + /// Check if bounds of all inner loops are defined outside of `forOp` /// and return false if not. static bool areInnerBoundsInvariant(scf::ForOp forOp) { diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp index 48be287ef833b..0cf5f0823be63 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp @@ -84,7 +84,11 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface { // TODO: we need to filter OpKill here to avoid inlining it to // a loop continue construct: // https://github.com/KhronosGroup/SPIRV-Headers/issues/86 - // However OpKill is fragment shader specific and we don't support it yet. + // For now, we just disallow inlining OpKill anywhere in the code, + // but this restriction should be relaxed, as pointed above. + if (isa(op)) + return false; + return true; } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index e741144647043..fad7db48b9872 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -10,9 +10,8 @@ #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Arith/Utils/Utils.h" #include "mlir/Dialect/Complex/IR/Complex.h" -#include "mlir/Dialect/Linalg/IR/Linalg.h" +#include "mlir/Dialect/Linalg/IR/LinalgInterfaces.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" -#include "mlir/Dialect/Tensor/Utils/Utils.h" #include "mlir/Dialect/Utils/IndexingUtils.h" #include "mlir/Dialect/Utils/ReshapeOpsUtils.h" #include "mlir/Dialect/Utils/StaticValueUtils.h" diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp index 69b3f6d674167..b9bcedb7fe71d 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -1034,9 +1034,22 @@ OpFoldResult PadOp::fold(FoldAdaptor adaptor) { // Fold away cases where a tosa.resize operation returns a copy // of the input image. OpFoldResult ResizeOp::fold(FoldAdaptor adaptor) { - ArrayRef offset = getOffset(); - ArrayRef border = getBorder(); - ArrayRef scale = getScale(); + auto scaleAttr = + llvm::dyn_cast_if_present(adaptor.getScale()); + auto offsetAttr = + llvm::dyn_cast_if_present(adaptor.getOffset()); + auto borderAttr = + llvm::dyn_cast_if_present(adaptor.getBorder()); + if (!scaleAttr || !offsetAttr || !borderAttr) { + return {}; + } + + auto scale = tosa::convertFromIntAttr(scaleAttr, /* rank = */ 4); + auto offset = tosa::convertFromIntAttr(offsetAttr, /* rank = */ 2); + auto border = tosa::convertFromIntAttr(borderAttr, /* rank = */ 2); + if (scale.size() != 4 || offset.size() != 2 || border.size() != 2) { + return {}; + } // Check unit scaling. if (scale[0] != scale[1] || scale[2] != scale[3]) { diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 67021d6c07401..d21e218308df7 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1598,9 +1598,14 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents( (inputWidth == ShapedType::kDynamic)) return failure(); - llvm::ArrayRef scaleInt = adaptor.getScale(); - llvm::ArrayRef offsetInt = adaptor.getOffset(); - llvm::ArrayRef borderInt = adaptor.getBorder(); + SmallVector scaleInt, offsetInt, borderInt; + if (!tosa::getConstShapeValue(adaptor.getScale().getDefiningOp(), scaleInt) || + !tosa::getConstShapeValue(adaptor.getOffset().getDefiningOp(), + offsetInt) || + !tosa::getConstShapeValue(adaptor.getBorder().getDefiningOp(), + borderInt)) { + return failure(); + } // Compute the output shape based on attributes: scale, offset, and border. outputShape[1] = @@ -1617,6 +1622,98 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents( return success(); } +LogicalResult tosa::ResizeOp::verify() { + const Value input = getInput(); + const Value output = getOutput(); + const RankedTensorType inputType = + llvm::dyn_cast(input.getType()); + const RankedTensorType outputType = + llvm::dyn_cast(output.getType()); + + if (!inputType) + return emitOpError("expect a ranked input tensor"); + if (!outputType) + return emitOpError("expect a ranked output tensor"); + + const int64_t oh = outputType.getDimSize(1); + const int64_t ow = outputType.getDimSize(2); + const int64_t ih = inputType.getDimSize(1); + const int64_t iw = inputType.getDimSize(2); + + SmallVector scaleValues; + SmallVector offsetValues; + SmallVector borderValues; + if (!tosa::getConstShapeValue(getScale().getDefiningOp(), scaleValues) || + !tosa::getConstShapeValue(getOffset().getDefiningOp(), offsetValues) || + !tosa::getConstShapeValue(getBorder().getDefiningOp(), borderValues)) { + // Skip following checks if shape is not constant + return success(); + } + + if (llvm::any_of(scaleValues, [](int64_t s) { return s <= 0; })) + return emitOpError("expect all scale values to be > 0, got ") + << scaleValues; + + const int64_t scaleYN = scaleValues[0]; + const int64_t scaleYD = scaleValues[1]; + const int64_t scaleXN = scaleValues[2]; + const int64_t scaleXD = scaleValues[3]; + + const int64_t offsetY = offsetValues[0]; + const int64_t offsetX = offsetValues[1]; + + const int64_t borderY = borderValues[0]; + const int64_t borderX = borderValues[1]; + + auto idivCheck = [](const int64_t lhs, + const int64_t rhs) -> std::optional { + if (lhs % rhs != 0) + return std::nullopt; + return lhs / rhs; + }; + + // Don't check with input height that could be broadcast (ih != 1) + // since Linalg, a consumer of TOSA, expects broadcasting support + // in resize to be available. Taking the cautious approach for now, + // we can consider removing support for broadcasting later. + if (ih != ShapedType::kDynamic && ih != 1) { + const std::optional calculatedOutHeightMinusOne = + idivCheck((ih - 1) * scaleYN - offsetY + borderY, scaleYD); + if (!calculatedOutHeightMinusOne.has_value()) + return emitOpError("expected (input_height - 1) * scale_y_n - offset_y + " + "border_y ") + << "to be wholly divisible by scale_y_d, got ((" << ih + << " - 1) * " << scaleYN << " - " << offsetY << " + " << borderY + << ") / " << scaleYD; + const int64_t calculatedOutHeight = calculatedOutHeightMinusOne.value() + 1; + if (oh != ShapedType::kDynamic && calculatedOutHeight != oh) + return emitOpError("calculated output height did not match expected: ") + << "calculated=" << calculatedOutHeight << ", expected=" << oh; + } + + // Don't check with input width that could be broadcast (iw != 1) + // since Linalg, a consumer of TOSA, expects broadcasting support + // in resize to be available. Taking the cautious approach for now, + // we can consider removing support for broadcasting later. + if (iw != ShapedType::kDynamic && iw != 1) { + const int64_t scaledInWidth = (iw - 1) * scaleXN - offsetX + borderX; + const std::optional calculatedOutWidthMinusOne = + idivCheck(scaledInWidth, scaleXD); + if (!calculatedOutWidthMinusOne.has_value()) + return emitOpError("expected (input_width - 1) * scale_x_n - offset_x + " + "border_x ") + << "to be wholly divisible by scale_x_d, got ((" << iw + << " - 1) * " << scaleXN << " - " << offsetX << " + " << borderX + << ") / " << scaleXD; + const int64_t calculatedOutWidth = calculatedOutWidthMinusOne.value() + 1; + if (ow != ShapedType::kDynamic && calculatedOutWidth != ow) + return emitOpError("calculated output width did not match expected: ") + << "calculated=" << calculatedOutWidth << ", expected=" << ow; + } + + return success(); +} + LogicalResult tosa::ScatterOp::inferReturnTypeComponents( MLIRContext *context, ::std::optional location, ScatterOp::Adaptor adaptor, diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp index 7f59ff70d3374..f4abe628d37d1 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp @@ -18,6 +18,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" +#include "mlir/Dialect/Tosa/Utils/ConversionUtils.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Matchers.h" @@ -106,6 +107,9 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { // check variable read/write data types against variable declarations LogicalResult applyVariableCheck(Operation *op); + // check error if conditions + LogicalResult applyErrorIfCheck(Operation *op); + private: void populateConstantOperandChecks() { constCheckers.emplace_back(checkConstantOperandPad); @@ -369,11 +373,14 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { // Resize op: level check max scales bool levelCheckResize(Operation *op) { if (auto resize = dyn_cast(op)) { - auto scale = resize.getScale(); - int16_t scaleYN = scale[0]; - int16_t scaleYD = scale[1]; - int16_t scaleXN = scale[2]; - int16_t scaleXD = scale[3]; + SmallVector scale; + if (!tosa::getConstShapeValue(resize.getScale().getDefiningOp(), scale)) { + return false; + } + const int64_t scaleYN = scale[0]; + const int64_t scaleYD = scale[1]; + const int64_t scaleXN = scale[2]; + const int64_t scaleXD = scale[3]; if (!levelCheckScale(op, scaleYN / scaleYD, "scale_y_n/scale_y_d <= MAX_SCALE") || !levelCheckScale(op, scaleXN / scaleXD, @@ -505,6 +512,169 @@ LogicalResult TosaValidation::applyVariableCheck(Operation *op) { return success(); } +bool checkErrorIfResize(Operation *op) { + auto resize = dyn_cast(op); + if (!resize) + return true; + + const Value input = resize.getInput(); + const Value output = resize.getOutput(); + const RankedTensorType inputType = + llvm::dyn_cast(input.getType()); + const RankedTensorType outputType = + llvm::dyn_cast(output.getType()); + + if (!inputType || !outputType) { + op->emitOpError("expect ranked input/output tensor"); + return false; + } + + // Ensure the image size is supported by GPU APIs and that for integer + // implementations, position * stride does not overflow int32_t. + if (inputType.hasStaticShape() && outputType.hasStaticShape()) { + const SmallVector sizes = { + outputType.getDimSize(1), outputType.getDimSize(2), + inputType.getDimSize(1), inputType.getDimSize(2)}; + const int64_t *maxDim = llvm::max_element(sizes); + if (maxDim != sizes.end() && *maxDim >= 16384) { + op->emitOpError("expect input/output height/width dims to be < 16384, ") + << "got [OH, OW, IH, IW] = " << sizes; + return false; + } + } + + SmallVector scale; + if (!tosa::getConstShapeValue(resize.getScale().getDefiningOp(), scale)) { + return false; + } + + const int64_t scaleYN = scale[0]; + const int64_t scaleYD = scale[1]; + const int64_t scaleXN = scale[2]; + const int64_t scaleXD = scale[3]; + + // Ensure scale values don't overflow int32 accumulator + if (scaleYN > (1 << 11) || scaleXN > (1 << 11)) { + op->emitOpError("expect all scale numerator values to be <= (1 << 11), " + "got scale_y_n=") + << scaleYN << ", scale_x_n=" << scaleXN; + return false; + } + + if (scaleYD >= 16 * scaleYN || scaleXD >= 16 * scaleXN) { + op->emitOpError("expect a downscale ratio larger than 1/16, got y=") + << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD; + return false; + } + + SmallVector offset; + SmallVector border; + if (!tosa::getConstShapeValue(resize.getOffset().getDefiningOp(), offset) || + !tosa::getConstShapeValue(resize.getBorder().getDefiningOp(), border)) { + return false; + } + + const int64_t offsetY = offset[0]; + const int64_t offsetX = offset[1]; + // Set a consistent lower limit of 1/16 downscale to simplify + // implementations + if (offsetY < -scaleYN || offsetY >= 16 * scaleYN) { + op->emitOpError( + "expect offsetY / scaleYNumerator to be in range [-1, 16), got ") + << offsetY << "/" << scaleYN; + return false; + } + if (offsetX < -scaleXN || offsetX >= 16 * scaleXN) { + op->emitOpError( + "expect offsetX / scaleXNumerator to be in range [-1, 16), got ") + << offsetX << "/" << scaleXN; + return false; + } + + const int64_t borderY = border[0]; + const int64_t borderX = border[1]; + if (borderY < -16 * scaleYN || borderY >= scaleYN) { + op->emitOpError( + "expect borderY / scaleYNumerator to be in range [-16, 1), got ") + << borderY << "/" << scaleYN; + return false; + } + if (borderX < -16 * scaleXN || borderX >= scaleXN) { + op->emitOpError( + "expect borderX / scaleXNumerator to be in range [-16, 1), got ") + << borderX << "/" << scaleXN; + return false; + } + + // The following section of code is mostly duplicated with ResizeOp::verify(). + // + // In TOSA specification, we do not support broadcast behavior. + // However, there is a rewrite pattern to materialize broadcast ResizeOp. + // It makes invalid TOSA ResizeOp into valid one. To avoid breaking + // existing code, we keep the rewrite pattern untouched. So, we need + // loose the checking in ResizeOp::verify() to support broadcast ResizeOp. + // + // Here is a strict checking to conform TOSA specification. + // FIXME: Remove the duplicated checkings when broadcast ResizeOp is removed. + auto idivCheck = [](const int64_t lhs, + const int64_t rhs) -> std::optional { + if (lhs % rhs != 0) + return std::nullopt; + return lhs / rhs; + }; + + const int64_t oh = outputType.getDimSize(1); + const int64_t ow = outputType.getDimSize(2); + const int64_t ih = inputType.getDimSize(1); + const int64_t iw = inputType.getDimSize(2); + + if (ih != ShapedType::kDynamic) { + const std::optional calculatedOutHeightMinusOne = + idivCheck((ih - 1) * scaleYN - offsetY + borderY, scaleYD); + if (!calculatedOutHeightMinusOne.has_value()) { + op->emitOpError("expected (input_height - 1) * scale_y_n - offset_y + " + "border_y ") + << "to be wholly divisible by scale_y_d, got ((" << ih << " - 1) * " + << scaleYN << " - " << offsetY << " + " << borderY << ") / " + << scaleYD; + return false; + } + const int64_t calculatedOutHeight = calculatedOutHeightMinusOne.value() + 1; + if (oh != ShapedType::kDynamic && calculatedOutHeight != oh) { + op->emitOpError("calculated output height did not match expected: ") + << "calculated=" << calculatedOutHeight << ", expected=" << oh; + return false; + } + } + + if (iw != ShapedType::kDynamic) { + const std::optional calculatedOutWidthMinusOne = + idivCheck((iw - 1) * scaleXN - offsetX + borderX, scaleXD); + if (!calculatedOutWidthMinusOne.has_value()) { + op->emitOpError("expected (input_width - 1) * scale_x_n - offset_x + " + "border_x ") + << "to be wholly divisible by scale_x_d, got ((" << iw << " - 1) * " + << scaleXN << " - " << offsetX << " + " << borderX << ") / " + << scaleXD; + return false; + } + const int64_t calculatedOutWidth = calculatedOutWidthMinusOne.value() + 1; + if (ow != ShapedType::kDynamic && calculatedOutWidth != ow) { + op->emitOpError("calculated output width did not match expected: ") + << "calculated=" << calculatedOutWidth << ", expected=" << ow; + return false; + } + } + + return true; +} + +LogicalResult TosaValidation::applyErrorIfCheck(Operation *op) { + if (!checkErrorIfResize(op)) + return failure(); + return success(); +} + bool TosaValidation::isValidElementType(Type type) { if (isa(type)) { if (!isEnabledProfile(TosaProfileEnum::MainInference)) @@ -568,6 +738,10 @@ void TosaValidation::runOnOperation() { // do variable type checks if (failed(applyVariableCheck(op))) signalPassFailure(); + + // do error if checks + if (StrictOperationSpecAlignment && failed(applyErrorIfCheck(op))) + signalPassFailure(); }); } } // namespace diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp index 8ab12d038849f..d1a8732dac212 100644 --- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp +++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp @@ -198,3 +198,21 @@ bool mlir::tosa::getConstShapeValue(Operation *op, // for undefined op, return false. return false; } + +// returns a small vector of int64_t values that attr contains +SmallVector +mlir::tosa::convertFromIntAttr(const DenseElementsAttr &attr, const int rank) { + if (attr.isSplat()) { + int64_t v = attr.getSplatValue().getSExtValue(); + return SmallVector(rank, v); + } + + if (auto int_array_attr = llvm::dyn_cast(attr)) { + SmallVector vec; + for (APInt val : int_array_attr.getValues()) { + vec.push_back(val.getSExtValue()); + } + return vec; + } + return {}; +} diff --git a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp index 260ac9ce589a3..f1fbb39b97fc4 100644 --- a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp +++ b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp @@ -131,6 +131,39 @@ struct DotBF16OpConversion : public ConvertOpToLLVMPattern { } }; +struct CvtPackedF32ToBF16Conversion + : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(CvtPackedF32ToBF16Op op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto typeA = dyn_cast(op.getA().getType()); + unsigned elemBitWidth = typeA.getElementTypeBitWidth(); + unsigned opBitWidth = typeA.getShape()[0] * elemBitWidth; + + auto opType = op.getDst().getType(); + auto opA = op.getA(); + + switch (opBitWidth) { + case 256: { + rewriter.replaceOpWithNewOp(op, opType, opA); + break; + } + case 512: { + rewriter.replaceOpWithNewOp(op, opType, opA); + break; + } + default: { + return rewriter.notifyMatchFailure( + op, "unsupported AVX512-BF16 packed f32 to bf16 variant"); + } + } + + return success(); + } +}; + struct RsqrtOpConversion : public ConvertOpToLLVMPattern { using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; @@ -202,8 +235,10 @@ using Registry = RegistryImpl< void mlir::populateX86VectorLegalizeForLLVMExportPatterns( const LLVMTypeConverter &converter, RewritePatternSet &patterns) { Registry::registerPatterns(converter, patterns); - patterns.add(converter); + patterns + .add( + converter); } void mlir::configureX86VectorLegalizeForExportTarget( @@ -215,6 +250,9 @@ void mlir::configureX86VectorLegalizeForExportTarget( target.addLegalOp(); target.addLegalOp(); target.addIllegalOp(); + target.addLegalOp(); + target.addLegalOp(); + target.addIllegalOp(); target.addLegalOp(); target.addIllegalOp(); target.addLegalOp(); diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp index 0fa97f1f38079..1f22d4f37a813 100644 --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -125,6 +125,7 @@ void OpAsmPrinter::printFunctionalType(Operation *op) { //===----------------------------------------------------------------------===// /// The OpAsmOpInterface, see OpAsmInterface.td for more details. +#include "mlir/IR/OpAsmAttrInterface.cpp.inc" #include "mlir/IR/OpAsmOpInterface.cpp.inc" #include "mlir/IR/OpAsmTypeInterface.cpp.inc" @@ -1159,15 +1160,30 @@ template void AliasInitializer::generateAlias(T symbol, InProgressAliasInfo &alias, bool canBeDeferred) { SmallString<32> nameBuffer; - for (const auto &interface : interfaces) { - OpAsmDialectInterface::AliasResult result = - interface.getAlias(symbol, aliasOS); - if (result == OpAsmDialectInterface::AliasResult::NoAlias) - continue; - nameBuffer = std::move(aliasBuffer); - assert(!nameBuffer.empty() && "expected valid alias name"); - if (result == OpAsmDialectInterface::AliasResult::FinalAlias) - break; + + OpAsmDialectInterface::AliasResult symbolInterfaceResult = + OpAsmDialectInterface::AliasResult::NoAlias; + using InterfaceT = std::conditional_t, + OpAsmAttrInterface, OpAsmTypeInterface>; + if (auto symbolInterface = dyn_cast(symbol)) { + symbolInterfaceResult = symbolInterface.getAlias(aliasOS); + if (symbolInterfaceResult != OpAsmDialectInterface::AliasResult::NoAlias) { + nameBuffer = std::move(aliasBuffer); + assert(!nameBuffer.empty() && "expected valid alias name"); + } + } + + if (symbolInterfaceResult != OpAsmDialectInterface::AliasResult::FinalAlias) { + for (const auto &interface : interfaces) { + OpAsmDialectInterface::AliasResult result = + interface.getAlias(symbol, aliasOS); + if (result == OpAsmDialectInterface::AliasResult::NoAlias) + continue; + nameBuffer = std::move(aliasBuffer); + assert(!nameBuffer.empty() && "expected valid alias name"); + if (result == OpAsmDialectInterface::AliasResult::FinalAlias) + break; + } } if (nameBuffer.empty()) @@ -1536,10 +1552,13 @@ StringRef maybeGetValueNameFromLoc(Value value, StringRef name) { } // namespace void SSANameState::numberValuesInRegion(Region ®ion) { + // Indicates whether OpAsmOpInterface set a name. + bool opAsmOpInterfaceUsed = false; auto setBlockArgNameFn = [&](Value arg, StringRef name) { assert(!valueIDs.count(arg) && "arg numbered multiple times"); assert(llvm::cast(arg).getOwner()->getParent() == ®ion && "arg not defined in current region"); + opAsmOpInterfaceUsed = true; if (LLVM_UNLIKELY(printerFlags.shouldUseNameLocAsPrefix())) name = maybeGetValueNameFromLoc(arg, name); setValueName(arg, name); @@ -1549,6 +1568,15 @@ void SSANameState::numberValuesInRegion(Region ®ion) { if (Operation *op = region.getParentOp()) { if (auto asmInterface = dyn_cast(op)) asmInterface.getAsmBlockArgumentNames(region, setBlockArgNameFn); + // If the OpAsmOpInterface didn't set a name, get name from the type. + if (!opAsmOpInterfaceUsed) { + for (BlockArgument arg : region.getArguments()) { + if (auto interface = dyn_cast(arg.getType())) { + interface.getAsmName( + [&](StringRef name) { setBlockArgNameFn(arg, name); }); + } + } + } } } @@ -1598,9 +1626,12 @@ void SSANameState::numberValuesInBlock(Block &block) { void SSANameState::numberValuesInOp(Operation &op) { // Function used to set the special result names for the operation. SmallVector resultGroups(/*Size=*/1, /*Value=*/0); + // Indicates whether OpAsmOpInterface set a name. + bool opAsmOpInterfaceUsed = false; auto setResultNameFn = [&](Value result, StringRef name) { assert(!valueIDs.count(result) && "result numbered multiple times"); assert(result.getDefiningOp() == &op && "result not defined by 'op'"); + opAsmOpInterfaceUsed = true; if (LLVM_UNLIKELY(printerFlags.shouldUseNameLocAsPrefix())) name = maybeGetValueNameFromLoc(result, name); setValueName(result, name); @@ -1630,6 +1661,21 @@ void SSANameState::numberValuesInOp(Operation &op) { asmInterface.getAsmBlockNames(setBlockNameFn); asmInterface.getAsmResultNames(setResultNameFn); } + if (!opAsmOpInterfaceUsed) { + // If the OpAsmOpInterface didn't set a name, and all results have + // OpAsmTypeInterface, get names from types. + bool allHaveOpAsmTypeInterface = + llvm::all_of(op.getResultTypes(), [&](Type type) { + return isa(type); + }); + if (allHaveOpAsmTypeInterface) { + for (OpResult result : op.getResults()) { + auto interface = cast(result.getType()); + interface.getAsmName( + [&](StringRef name) { setResultNameFn(result, name); }); + } + } + } } unsigned numResults = op.getNumResults(); diff --git a/mlir/lib/Target/Cpp/TranslateRegistration.cpp b/mlir/lib/Target/Cpp/TranslateRegistration.cpp index 1aa98834a73f4..2108ffd414c56 100644 --- a/mlir/lib/Target/Cpp/TranslateRegistration.cpp +++ b/mlir/lib/Target/Cpp/TranslateRegistration.cpp @@ -29,12 +29,17 @@ void registerToCppTranslation() { llvm::cl::desc("Declare variables at top when emitting C/C++"), llvm::cl::init(false)); + static llvm::cl::opt fileId( + "file-id", llvm::cl::desc("Emit emitc.file ops with matching id"), + llvm::cl::init("")); + TranslateFromMLIRRegistration reg( "mlir-to-cpp", "translate from mlir to cpp", [](Operation *op, raw_ostream &output) { return emitc::translateToCpp( op, output, - /*declareVariablesAtTop=*/declareVariablesAtTop); + /*declareVariablesAtTop=*/declareVariablesAtTop, + /*fileId=*/fileId); }, [](DialectRegistry ®istry) { // clang-format off diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp index 3ba1244e637ff..abff252575eb0 100644 --- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp +++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp @@ -114,7 +114,8 @@ static FailureOr getOperatorPrecedence(Operation *operation) { namespace { /// Emitter that uses dialect specific emitters to emit C++ code. struct CppEmitter { - explicit CppEmitter(raw_ostream &os, bool declareVariablesAtTop); + explicit CppEmitter(raw_ostream &os, bool declareVariablesAtTop, + StringRef fileId); /// Emits attribute or returns failure. LogicalResult emitAttribute(Location loc, Attribute attr); @@ -231,6 +232,11 @@ struct CppEmitter { /// be declared at the beginning of a function. bool shouldDeclareVariablesAtTop() { return declareVariablesAtTop; }; + /// Returns whether this file op should be emitted + bool shouldEmitFile(FileOp file) { + return !fileId.empty() && file.getId() == fileId; + } + /// Get expression currently being emitted. ExpressionOp getEmittedExpression() { return emittedExpression; } @@ -258,6 +264,9 @@ struct CppEmitter { /// includes results from ops located in nested regions. bool declareVariablesAtTop; + /// Only emit file ops whos id matches this value. + std::string fileId; + /// Map from value to name of C++ variable that contain the name. ValueMapper valueMapper; @@ -559,7 +568,21 @@ static LogicalResult printOperation(CppEmitter &emitter, emitc::VerbatimOp verbatimOp) { raw_ostream &os = emitter.ostream(); - os << verbatimOp.getValue(); + FailureOr> items = + verbatimOp.parseFormatString(); + if (failed(items)) + return failure(); + + auto fmtArg = verbatimOp.getFmtArgs().begin(); + + for (ReplacementItem &item : *items) { + if (auto *str = std::get_if(&item)) { + os << *str; + } else { + if (failed(emitter.emitOperand(*fmtArg++))) + return failure(); + } + } return success(); } @@ -963,6 +986,19 @@ static LogicalResult printOperation(CppEmitter &emitter, ModuleOp moduleOp) { return success(); } +static LogicalResult printOperation(CppEmitter &emitter, FileOp file) { + if (!emitter.shouldEmitFile(file)) + return success(); + + CppEmitter::Scope scope(emitter); + + for (Operation &op : file) { + if (failed(emitter.emitOperation(op, /*trailingSemicolon=*/false))) + return failure(); + } + return success(); +} + static LogicalResult printFunctionArgs(CppEmitter &emitter, Operation *functionOp, ArrayRef arguments) { @@ -1162,8 +1198,10 @@ static LogicalResult printOperation(CppEmitter &emitter, return success(); } -CppEmitter::CppEmitter(raw_ostream &os, bool declareVariablesAtTop) - : os(os), declareVariablesAtTop(declareVariablesAtTop) { +CppEmitter::CppEmitter(raw_ostream &os, bool declareVariablesAtTop, + StringRef fileId) + : os(os), declareVariablesAtTop(declareVariablesAtTop), + fileId(fileId.str()) { valueInScopeCount.push(0); labelInScopeCount.push(0); } @@ -1558,12 +1596,13 @@ LogicalResult CppEmitter::emitOperation(Operation &op, bool trailingSemicolon) { emitc::BitwiseRightShiftOp, emitc::BitwiseXorOp, emitc::CallOp, emitc::CallOpaqueOp, emitc::CastOp, emitc::CmpOp, emitc::ConditionalOp, emitc::ConstantOp, emitc::DeclareFuncOp, - emitc::DivOp, emitc::ExpressionOp, emitc::ForOp, emitc::FuncOp, - emitc::GlobalOp, emitc::IfOp, emitc::IncludeOp, emitc::LoadOp, - emitc::LogicalAndOp, emitc::LogicalNotOp, emitc::LogicalOrOp, - emitc::MulOp, emitc::RemOp, emitc::ReturnOp, emitc::SubOp, - emitc::SwitchOp, emitc::UnaryMinusOp, emitc::UnaryPlusOp, - emitc::VariableOp, emitc::VerbatimOp>( + emitc::DivOp, emitc::ExpressionOp, emitc::FileOp, emitc::ForOp, + emitc::FuncOp, emitc::GlobalOp, emitc::IfOp, emitc::IncludeOp, + emitc::LoadOp, emitc::LogicalAndOp, emitc::LogicalNotOp, + emitc::LogicalOrOp, emitc::MulOp, emitc::RemOp, emitc::ReturnOp, + emitc::SubOp, emitc::SwitchOp, emitc::UnaryMinusOp, + emitc::UnaryPlusOp, emitc::VariableOp, emitc::VerbatimOp>( + [&](auto op) { return printOperation(*this, op); }) // Func ops. .Case( @@ -1606,8 +1645,9 @@ LogicalResult CppEmitter::emitOperation(Operation &op, bool trailingSemicolon) { // Never emit a semicolon for some operations, especially if endening with // `}`. trailingSemicolon &= - !isa(op); + !isa( + op); os << (trailingSemicolon ? ";\n" : "\n"); @@ -1743,7 +1783,8 @@ LogicalResult CppEmitter::emitTupleType(Location loc, ArrayRef types) { } LogicalResult emitc::translateToCpp(Operation *op, raw_ostream &os, - bool declareVariablesAtTop) { - CppEmitter emitter(os, declareVariablesAtTop); + bool declareVariablesAtTop, + StringRef fileId) { + CppEmitter emitter(os, declareVariablesAtTop, fileId); return emitter.emitOperation(*op, /*trailingSemicolon=*/false); } diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index 51a3cbdbb5e7f..b9d88a68410ee 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -2809,13 +2809,23 @@ getRefPtrIfDeclareTarget(mlir::Value value, } namespace { +// Append customMappers information to existing MapInfosTy +struct MapInfosTy : llvm::OpenMPIRBuilder::MapInfosTy { + SmallVector Mappers; + + /// Append arrays in \a CurInfo. + void append(MapInfosTy &curInfo) { + Mappers.append(curInfo.Mappers.begin(), curInfo.Mappers.end()); + llvm::OpenMPIRBuilder::MapInfosTy::append(curInfo); + } +}; // A small helper structure to contain data gathered // for map lowering and coalese it into one area and // avoiding extra computations such as searches in the // llvm module for lowered mapped variables or checking // if something is declare target (and retrieving the // value) more than neccessary. -struct MapInfoData : llvm::OpenMPIRBuilder::MapInfosTy { +struct MapInfoData : MapInfosTy { llvm::SmallVector IsDeclareTarget; llvm::SmallVector IsAMember; // Identify if mapping was added by mapClause or use_device clauses. @@ -2834,7 +2844,7 @@ struct MapInfoData : llvm::OpenMPIRBuilder::MapInfosTy { OriginalValue.append(CurInfo.OriginalValue.begin(), CurInfo.OriginalValue.end()); BaseType.append(CurInfo.BaseType.begin(), CurInfo.BaseType.end()); - llvm::OpenMPIRBuilder::MapInfosTy::append(CurInfo); + MapInfosTy::append(CurInfo); } }; } // namespace @@ -2955,6 +2965,12 @@ static void collectMapDataFromMapOperands( mapData.Names.push_back(LLVM::createMappingInformation( mapOp.getLoc(), *moduleTranslation.getOpenMPBuilder())); mapData.DevicePointers.push_back(llvm::OpenMPIRBuilder::DeviceInfoTy::None); + if (mapOp.getMapperId()) + mapData.Mappers.push_back( + SymbolTable::lookupNearestSymbolFrom( + mapOp, mapOp.getMapperIdAttr())); + else + mapData.Mappers.push_back(nullptr); mapData.IsAMapping.push_back(true); mapData.IsAMember.push_back(checkIsAMember(mapVars, mapOp)); } @@ -2999,6 +3015,7 @@ static void collectMapDataFromMapOperands( mapData.Names.push_back(LLVM::createMappingInformation( mapOp.getLoc(), *moduleTranslation.getOpenMPBuilder())); mapData.DevicePointers.push_back(devInfoTy); + mapData.Mappers.push_back(nullptr); mapData.IsAMapping.push_back(false); mapData.IsAMember.push_back(checkIsAMember(useDevOperands, mapOp)); } @@ -3164,9 +3181,8 @@ calculateBoundsOffset(LLVM::ModuleTranslation &moduleTranslation, // inside of CGOpenMPRuntime.cpp static llvm::omp::OpenMPOffloadMappingFlags mapParentWithMembers( LLVM::ModuleTranslation &moduleTranslation, llvm::IRBuilderBase &builder, - llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl, - llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, MapInfoData &mapData, - uint64_t mapDataIndex, bool isTargetParams) { + llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl, MapInfosTy &combinedInfo, + MapInfoData &mapData, uint64_t mapDataIndex, bool isTargetParams) { // Map the first segment of our structure combinedInfo.Types.emplace_back( isTargetParams @@ -3174,6 +3190,7 @@ static llvm::omp::OpenMPOffloadMappingFlags mapParentWithMembers( : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE); combinedInfo.DevicePointers.emplace_back( mapData.DevicePointers[mapDataIndex]); + combinedInfo.Mappers.emplace_back(mapData.Mappers[mapDataIndex]); combinedInfo.Names.emplace_back(LLVM::createMappingInformation( mapData.MapClause[mapDataIndex]->getLoc(), ompBuilder)); combinedInfo.BasePointers.emplace_back(mapData.BasePointers[mapDataIndex]); @@ -3237,6 +3254,7 @@ static llvm::omp::OpenMPOffloadMappingFlags mapParentWithMembers( combinedInfo.Types.emplace_back(mapFlag); combinedInfo.DevicePointers.emplace_back( llvm::OpenMPIRBuilder::DeviceInfoTy::None); + combinedInfo.Mappers.emplace_back(nullptr); combinedInfo.Names.emplace_back(LLVM::createMappingInformation( mapData.MapClause[mapDataIndex]->getLoc(), ompBuilder)); combinedInfo.BasePointers.emplace_back(mapData.BasePointers[mapDataIndex]); @@ -3270,9 +3288,9 @@ static bool checkIfPointerMap(omp::MapInfoOp mapOp) { // This function is intended to add explicit mappings of members static void processMapMembersWithParent( LLVM::ModuleTranslation &moduleTranslation, llvm::IRBuilderBase &builder, - llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl, - llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, MapInfoData &mapData, - uint64_t mapDataIndex, llvm::omp::OpenMPOffloadMappingFlags memberOfFlag) { + llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl, MapInfosTy &combinedInfo, + MapInfoData &mapData, uint64_t mapDataIndex, + llvm::omp::OpenMPOffloadMappingFlags memberOfFlag) { auto parentClause = llvm::cast(mapData.MapClause[mapDataIndex]); @@ -3300,6 +3318,7 @@ static void processMapMembersWithParent( combinedInfo.Types.emplace_back(mapFlag); combinedInfo.DevicePointers.emplace_back( llvm::OpenMPIRBuilder::DeviceInfoTy::None); + combinedInfo.Mappers.emplace_back(nullptr); combinedInfo.Names.emplace_back( LLVM::createMappingInformation(memberClause.getLoc(), ompBuilder)); combinedInfo.BasePointers.emplace_back( @@ -3322,6 +3341,7 @@ static void processMapMembersWithParent( combinedInfo.Types.emplace_back(mapFlag); combinedInfo.DevicePointers.emplace_back( mapData.DevicePointers[memberDataIdx]); + combinedInfo.Mappers.emplace_back(mapData.Mappers[memberDataIdx]); combinedInfo.Names.emplace_back( LLVM::createMappingInformation(memberClause.getLoc(), ompBuilder)); uint64_t basePointerIndex = @@ -3341,10 +3361,9 @@ static void processMapMembersWithParent( } } -static void -processIndividualMap(MapInfoData &mapData, size_t mapDataIdx, - llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, - bool isTargetParams, int mapDataParentIdx = -1) { +static void processIndividualMap(MapInfoData &mapData, size_t mapDataIdx, + MapInfosTy &combinedInfo, bool isTargetParams, + int mapDataParentIdx = -1) { // Declare Target Mappings are excluded from being marked as // OMP_MAP_TARGET_PARAM as they are not passed as parameters, they're // marked with OMP_MAP_PTR_AND_OBJ instead. @@ -3374,16 +3393,18 @@ processIndividualMap(MapInfoData &mapData, size_t mapDataIdx, combinedInfo.Pointers.emplace_back(mapData.Pointers[mapDataIdx]); combinedInfo.DevicePointers.emplace_back(mapData.DevicePointers[mapDataIdx]); + combinedInfo.Mappers.emplace_back(mapData.Mappers[mapDataIdx]); combinedInfo.Names.emplace_back(mapData.Names[mapDataIdx]); combinedInfo.Types.emplace_back(mapFlag); combinedInfo.Sizes.emplace_back(mapData.Sizes[mapDataIdx]); } -static void processMapWithMembersOf( - LLVM::ModuleTranslation &moduleTranslation, llvm::IRBuilderBase &builder, - llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl, - llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, MapInfoData &mapData, - uint64_t mapDataIndex, bool isTargetParams) { +static void processMapWithMembersOf(LLVM::ModuleTranslation &moduleTranslation, + llvm::IRBuilderBase &builder, + llvm::OpenMPIRBuilder &ompBuilder, + DataLayout &dl, MapInfosTy &combinedInfo, + MapInfoData &mapData, uint64_t mapDataIndex, + bool isTargetParams) { auto parentClause = llvm::cast(mapData.MapClause[mapDataIndex]); @@ -3488,8 +3509,7 @@ createAlteredByCaptureMap(MapInfoData &mapData, // Generate all map related information and fill the combinedInfo. static void genMapInfos(llvm::IRBuilderBase &builder, LLVM::ModuleTranslation &moduleTranslation, - DataLayout &dl, - llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, + DataLayout &dl, MapInfosTy &combinedInfo, MapInfoData &mapData, bool isTargetParams = false) { // We wish to modify some of the methods in which arguments are // passed based on their capture type by the target region, this can @@ -3529,6 +3549,78 @@ static void genMapInfos(llvm::IRBuilderBase &builder, } } +static llvm::Expected +emitUserDefinedMapper(Operation *declMapperOp, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation, + llvm::StringRef mapperFuncName); + +static llvm::Expected +getOrCreateUserDefinedMapperFunc(Operation *op, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + auto declMapperOp = cast(op); + std::string mapperFuncName = + moduleTranslation.getOpenMPBuilder()->createPlatformSpecificName( + {"omp_mapper", declMapperOp.getSymName()}); + + if (auto *lookupFunc = moduleTranslation.lookupFunction(mapperFuncName)) + return lookupFunc; + + return emitUserDefinedMapper(declMapperOp, builder, moduleTranslation, + mapperFuncName); +} + +static llvm::Expected +emitUserDefinedMapper(Operation *op, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation, + llvm::StringRef mapperFuncName) { + auto declMapperOp = cast(op); + auto declMapperInfoOp = declMapperOp.getDeclareMapperInfo(); + DataLayout dl = DataLayout(declMapperOp->getParentOfType()); + llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder(); + llvm::Type *varType = moduleTranslation.convertType(declMapperOp.getType()); + SmallVector mapVars = declMapperInfoOp.getMapVars(); + + using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; + + // Fill up the arrays with all the mapped variables. + MapInfosTy combinedInfo; + auto genMapInfoCB = + [&](InsertPointTy codeGenIP, llvm::Value *ptrPHI, + llvm::Value *unused2) -> llvm::OpenMPIRBuilder::MapInfosOrErrorTy { + builder.restoreIP(codeGenIP); + moduleTranslation.mapValue(declMapperOp.getSymVal(), ptrPHI); + moduleTranslation.mapBlock(&declMapperOp.getRegion().front(), + builder.GetInsertBlock()); + if (failed(moduleTranslation.convertBlock(declMapperOp.getRegion().front(), + /*ignoreArguments=*/true, + builder))) + return llvm::make_error(); + MapInfoData mapData; + collectMapDataFromMapOperands(mapData, mapVars, moduleTranslation, dl, + builder); + genMapInfos(builder, moduleTranslation, dl, combinedInfo, mapData); + + // Drop the mapping that is no longer necessary so that the same region can + // be processed multiple times. + moduleTranslation.forgetMapping(declMapperOp.getRegion()); + return combinedInfo; + }; + + auto customMapperCB = [&](unsigned i) -> llvm::Expected { + if (!combinedInfo.Mappers[i]) + return nullptr; + return getOrCreateUserDefinedMapperFunc(combinedInfo.Mappers[i], builder, + moduleTranslation); + }; + + llvm::Expected newFn = ompBuilder->emitUserDefinedMapper( + genMapInfoCB, varType, mapperFuncName, customMapperCB); + if (!newFn) + return newFn.takeError(); + moduleTranslation.mapFunction(mapperFuncName, *newFn); + return *newFn; +} + static LogicalResult convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, LLVM::ModuleTranslation &moduleTranslation) { @@ -3640,9 +3732,8 @@ convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, builder, useDevicePtrVars, useDeviceAddrVars); // Fill up the arrays with all the mapped variables. - llvm::OpenMPIRBuilder::MapInfosTy combinedInfo; - auto genMapInfoCB = - [&](InsertPointTy codeGenIP) -> llvm::OpenMPIRBuilder::MapInfosTy & { + MapInfosTy combinedInfo; + auto genMapInfoCB = [&](InsertPointTy codeGenIP) -> MapInfosTy & { builder.restoreIP(codeGenIP); genMapInfos(builder, moduleTranslation, DL, combinedInfo, mapData); return combinedInfo; @@ -3685,6 +3776,7 @@ convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, using BodyGenTy = llvm::OpenMPIRBuilder::BodyGenTy; auto bodyGenCB = [&](InsertPointTy codeGenIP, BodyGenTy bodyGenType) -> llvm::OpenMPIRBuilder::InsertPointOrErrorTy { + builder.restoreIP(codeGenIP); assert(isa(op) && "BodyGen requested for non TargetDataOp"); auto blockArgIface = cast(op); @@ -3693,8 +3785,6 @@ convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, case BodyGenTy::Priv: // Check if any device ptr/addr info is available if (!info.DevicePtrInfoMap.empty()) { - builder.restoreIP(codeGenIP); - mapUseDevice(llvm::OpenMPIRBuilder::DeviceInfoTy::Address, blockArgIface.getUseDeviceAddrBlockArgs(), useDeviceAddrVars, mapData, @@ -3724,7 +3814,6 @@ convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, case BodyGenTy::NoPriv: // If device info is available then region has already been generated if (info.DevicePtrInfoMap.empty()) { - builder.restoreIP(codeGenIP); // For device pass, if use_device_ptr(addr) mappings were present, // we need to link them here before codegen. if (ompBuilder->Config.IsTargetDevice.value_or(false)) { @@ -3745,17 +3834,28 @@ convertOmpTargetData(Operation *op, llvm::IRBuilderBase &builder, return builder.saveIP(); }; + auto customMapperCB = + [&](unsigned int i) -> llvm::Expected { + if (!combinedInfo.Mappers[i]) + return nullptr; + info.HasMapper = true; + return getOrCreateUserDefinedMapperFunc(combinedInfo.Mappers[i], builder, + moduleTranslation); + }; + llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder); llvm::OpenMPIRBuilder::InsertPointTy allocaIP = findAllocaInsertPoint(builder, moduleTranslation); llvm::OpenMPIRBuilder::InsertPointOrErrorTy afterIP = [&]() { if (isa(op)) - return ompBuilder->createTargetData( - ompLoc, allocaIP, builder.saveIP(), builder.getInt64(deviceID), - ifCond, info, genMapInfoCB, nullptr, bodyGenCB); - return ompBuilder->createTargetData(ompLoc, allocaIP, builder.saveIP(), - builder.getInt64(deviceID), ifCond, - info, genMapInfoCB, &RTLFn); + return ompBuilder->createTargetData(ompLoc, allocaIP, builder.saveIP(), + builder.getInt64(deviceID), ifCond, + info, genMapInfoCB, customMapperCB, + /*MapperFunc=*/nullptr, bodyGenCB, + /*DeviceAddrCB=*/nullptr); + return ompBuilder->createTargetData( + ompLoc, allocaIP, builder.saveIP(), builder.getInt64(deviceID), ifCond, + info, genMapInfoCB, customMapperCB, &RTLFn); }(); if (failed(handleError(afterIP, *op))) @@ -4367,9 +4467,9 @@ convertOmpTarget(Operation &opInst, llvm::IRBuilderBase &builder, collectMapDataFromMapOperands(mapData, mapVars, moduleTranslation, dl, builder); - llvm::OpenMPIRBuilder::MapInfosTy combinedInfos; - auto genMapInfoCB = [&](llvm::OpenMPIRBuilder::InsertPointTy codeGenIP) - -> llvm::OpenMPIRBuilder::MapInfosTy & { + MapInfosTy combinedInfos; + auto genMapInfoCB = + [&](llvm::OpenMPIRBuilder::InsertPointTy codeGenIP) -> MapInfosTy & { builder.restoreIP(codeGenIP); genMapInfos(builder, moduleTranslation, dl, combinedInfos, mapData, true); return combinedInfos; @@ -4438,15 +4538,28 @@ convertOmpTarget(Operation &opInst, llvm::IRBuilderBase &builder, findAllocaInsertPoint(builder, moduleTranslation); llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder); + llvm::OpenMPIRBuilder::TargetDataInfo info( + /*RequiresDevicePointerInfo=*/false, + /*SeparateBeginEndCalls=*/true); + + auto customMapperCB = + [&](unsigned int i) -> llvm::Expected { + if (!combinedInfos.Mappers[i]) + return nullptr; + info.HasMapper = true; + return getOrCreateUserDefinedMapperFunc(combinedInfos.Mappers[i], builder, + moduleTranslation); + }; + llvm::Value *ifCond = nullptr; if (Value targetIfCond = targetOp.getIfExpr()) ifCond = moduleTranslation.lookupValue(targetIfCond); llvm::OpenMPIRBuilder::InsertPointOrErrorTy afterIP = moduleTranslation.getOpenMPBuilder()->createTarget( - ompLoc, isOffloadEntry, allocaIP, builder.saveIP(), entryInfo, + ompLoc, isOffloadEntry, allocaIP, builder.saveIP(), info, entryInfo, defaultAttrs, runtimeAttrs, ifCond, kernelInput, genMapInfoCB, bodyCB, - argAccessorCB, dds, targetOp.getNowait()); + argAccessorCB, customMapperCB, dds, targetOp.getNowait()); if (failed(handleError(afterIP, opInst))) return failure(); @@ -4673,12 +4786,15 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder, .Case([&](omp::TaskwaitOp op) { return convertOmpTaskwaitOp(op, builder, moduleTranslation); }) - .Case([](auto op) { // `yield` and `terminator` can be just omitted. The block structure // was created in the region that handles their parent operation. // `declare_reduction` will be used by reductions and is not // converted directly, skip it. + // `declare_mapper` and `declare_mapper.info` are handled whenever they + // are referred to through a `map` clause. // `critical.declare` is only used to declare names of critical // sections which will be used by `critical` ops and hence can be // ignored for lowering. The OpenMP IRBuilder will create unique diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index fd0283b856b6b..8445e609c2244 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -962,13 +962,18 @@ ModuleImport::getOrCreateNamelessSymbolName(llvm::GlobalVariable *globalVar) { return symbolRef; } -LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { - // Insert the global after the last one or at the start of the module. +OpBuilder::InsertionGuard ModuleImport::setGlobalInsertionPoint() { OpBuilder::InsertionGuard guard(builder); - if (!aliasInsertionOp) - builder.setInsertionPointToStart(mlirModule.getBody()); + if (globalInsertionOp) + builder.setInsertionPointAfter(globalInsertionOp); else - builder.setInsertionPointAfter(aliasInsertionOp); + builder.setInsertionPointToStart(mlirModule.getBody()); + return guard; +} + +LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { + // Insert the alias after the last one or at the start of the module. + OpBuilder::InsertionGuard guard = setGlobalInsertionPoint(); Type type = convertType(alias->getValueType()); AliasOp aliasOp = builder.create( @@ -977,7 +982,7 @@ LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { /*dso_local=*/alias->isDSOLocal(), /*thread_local=*/alias->isThreadLocal(), /*attrs=*/ArrayRef()); - aliasInsertionOp = aliasOp; + globalInsertionOp = aliasOp; clearRegionState(); Block *block = builder.createBlock(&aliasOp.getInitializerRegion()); @@ -996,11 +1001,7 @@ LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { LogicalResult ModuleImport::convertGlobal(llvm::GlobalVariable *globalVar) { // Insert the global after the last one or at the start of the module. - OpBuilder::InsertionGuard guard(builder); - if (!globalInsertionOp) - builder.setInsertionPointToStart(mlirModule.getBody()); - else - builder.setInsertionPointAfter(globalInsertionOp); + OpBuilder::InsertionGuard guard = setGlobalInsertionPoint(); Attribute valueAttr; if (globalVar->hasInitializer()) @@ -1096,11 +1097,8 @@ ModuleImport::convertGlobalCtorsAndDtors(llvm::GlobalVariable *globalVar) { priorities.push_back(priority->getValue().getZExtValue()); } - OpBuilder::InsertionGuard guard(builder); - if (!globalInsertionOp) - builder.setInsertionPointToStart(mlirModule.getBody()); - else - builder.setInsertionPointAfter(globalInsertionOp); + // Insert the global after the last one or at the start of the module. + OpBuilder::InsertionGuard guard = setGlobalInsertionPoint(); if (globalVar->getName() == getGlobalCtorsVarName()) { globalInsertionOp = builder.create( diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir index 6f1ed73e778b4..d69de998346b5 100644 --- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir @@ -601,3 +601,16 @@ func.func @omp_taskloop(%arg0: index, %arg1 : memref) { } return } + +// ----- + +// CHECK-LABEL: omp.declare_mapper @my_mapper : !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)> { +omp.declare_mapper @my_mapper : !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)> { +^bb0(%arg0: !llvm.ptr): + %0 = llvm.mlir.constant(0 : i32) : i32 + %1 = llvm.getelementptr %arg0[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)> + %2 = omp.map.info var_ptr(%1 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "var%data"} + %3 = omp.map.info var_ptr(%arg0 : !llvm.ptr, !llvm.struct<"_QFdeclare_mapperTmy_type", (i32)>) map_clauses(tofrom) capture(ByRef) members(%2 : [0] : !llvm.ptr) -> !llvm.ptr {name = "var", partial_map = true} + // CHECK: omp.declare_mapper.info map_entries(%{{.*}}, %{{.*}} : !llvm.ptr, !llvm.ptr) + omp.declare_mapper.info map_entries(%3, %2 : !llvm.ptr, !llvm.ptr) +} diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir index d42d0a46692d4..5a2ee7d9e8720 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir @@ -2,7 +2,10 @@ // CHECK-LABEL: @unary_resize_nearest_fp32 func.func @unary_resize_nearest_fp32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3x1x1x7xf32> { - %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xf32>) -> tensor<3x1x1x7xf32> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<3x1x1x7xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xf32> // CHECK: return %arg0 return %resize : tensor<3x1x1x7xf32> } @@ -11,7 +14,10 @@ func.func @unary_resize_nearest_fp32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3x1x // CHECK-LABEL: @unary_resize_nearest_fp16 func.func @unary_resize_nearest_fp16(%arg0 : tensor<3x1x1x7xf16>) -> tensor<3x1x1x7xf16> { - %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xf16>) -> tensor<3x1x1x7xf16> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<3x1x1x7xf16>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xf16> // CHECK: return %arg0 return %resize : tensor<3x1x1x7xf16> } @@ -20,7 +26,10 @@ func.func @unary_resize_nearest_fp16(%arg0 : tensor<3x1x1x7xf16>) -> tensor<3x1x // CHECK-LABEL: @unary_resize_bilinear_fp32 func.func @unary_resize_bilinear_fp32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3x1x1x7xf32> { - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xf32>) -> tensor<3x1x1x7xf32> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x1x7xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xf32> // CHECK: return %arg0 return %resize : tensor<3x1x1x7xf32> } @@ -29,7 +38,10 @@ func.func @unary_resize_bilinear_fp32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3x1 // CHECK-LABEL: @unary_resize_bilinear_fp16 func.func @unary_resize_bilinear_fp16(%arg0 : tensor<3x1x1x7xf16>) -> tensor<3x1x1x7xf16> { - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xf16>) -> tensor<3x1x1x7xf16> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x1x7xf16>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xf16> // CHECK: return %arg0 return %resize : tensor<3x1x1x7xf16> } @@ -38,7 +50,10 @@ func.func @unary_resize_bilinear_fp16(%arg0 : tensor<3x1x1x7xf16>) -> tensor<3x1 // CHECK-LABEL: @unary_resize_nearest_i8 func.func @unary_resize_nearest_i8(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x1x1x7xi8> { - %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xi8>) -> tensor<3x1x1x7xi8> + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<3x1x1x7xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xi8> // CHECK: return %arg0 return %resize : tensor<3x1x1x7xi8> } @@ -50,14 +65,16 @@ func.func @broadcast_resize_nearest_f32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3 // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %arg0 // CHECK-NEXT{literal}: [[0], [1, 2, 3]] : tensor<3x1x1x7xf32> into tensor<3x7xf32> // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<3x1x5x7xf32> - // CHECK: %[[GENERIC:.+]] = linalg.generic + // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} // CHECK-SAME: ins(%[[COLLAPSE]] : tensor<3x7xf32>) outs(%[[EMPTY]] : tensor<3x1x5x7xf32>) // CHECK: ^bb0(%[[IN:.+]]: f32, %[[OUT:.+]]: f32): // CHECK: linalg.yield %[[IN]] : f32 - %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xf32>) -> tensor<3x1x5x7xf32> + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<3x1x1x7xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x5x7xf32> - // CHECK: return %[[GENERIC]] return %resize : tensor<3x1x5x7xf32> } @@ -79,19 +96,22 @@ func.func @broadcast_resize_bilinear_i8(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x // CHECK: %[[OUT:.+]] = arith.muli %[[MUL]], %[[C3]] : i32 // CHECK: linalg.yield %[[OUT]] : i32 // CHECK: } -> tensor<3x7xi32> - // CHECK: %[[EXPAND:.+]] = tensor.expand_shape %1 - // CHECK-SAME{literal}: [[0], [1, 2, 3]] : tensor<3x7xi32> into tensor<3x1x1x7xi32> - // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %expanded + // CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[RESIZE]] + // CHECK-SAME{literal}: [[0], [1, 2, 3]] output_shape [3, 1, 1, 7] : + // CHECK-SAME: tensor<3x7xi32> into tensor<3x1x1x7xi32> + // CHECK: %[[COLLAPSE_0:.+]] = tensor.collapse_shape %[[EXPAND]] // CHECK-SAME{literal}:[[0], [1, 2, 3]] : tensor<3x1x1x7xi32> into tensor<3x7xi32> - // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<3x4x5x7xi32> + // CHECK: %[[EMPTY_0:.+]] = tensor.empty() : tensor<3x4x5x7xi32> // CHECK: %[[BROADCAST:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - // CHECK-SAME: ins(%[[COLLAPSE]] : tensor<3x7xi32>) outs(%[[EMPTY]] : tensor<3x4x5x7xi32>) { + // CHECK-SAME: ins(%[[COLLAPSE_0]] : tensor<3x7xi32>) outs(%[[EMPTY_0]] : tensor<3x4x5x7xi32>) { // CHECK: ^bb0(%[[IN:.+]]: i32, %[[OUT:.+]]: i32): // CHECK: linalg.yield %[[IN]] : i32 - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xi8>) -> tensor<3x4x5x7xi32> + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x1x7xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x4x5x7xi32> - // CHECK: return %[[BROADCAST]] return %resize : tensor<3x4x5x7xi32> } @@ -102,7 +122,7 @@ func.func @unary_resize_bilinear_i32(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x1x1 // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %arg0 // CHECK-SAME{literal}: [[0], [1, 2, 3]] : tensor<3x1x1x7xi8> into tensor<3x7xi8> // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<3x7xi32> - // CHECK: %[[GENERIC:.+]] = linalg.generic + // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#map, #map] // CHECK-SAME: iterator_types = ["parallel", "parallel"]} // CHECK-SAME: ins(%[[COLLAPSE]] : tensor<3x7xi8>) outs(%[[EMPTY]] : tensor<3x7xi32>) { @@ -111,12 +131,15 @@ func.func @unary_resize_bilinear_i32(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x1x1 // CHECK-DAG: %[[C2:.+]] = arith.constant 2 : i32 // CHECK: %[[MUL0:.+]] = arith.muli %[[EXT]], %[[C2]] : i32 // CHECK-DAG: %[[C1:.+]] = arith.constant 2 : i32 - // CHECK: %4 = arith.muli %3, %[[C1]] : i32 - // CHECK: linalg.yield %4 : i32 + // CHECK: %7 = arith.muli %6, %[[C1]] : i32 + // CHECK: linalg.yield %7 : i32 // CHECK: } -> tensor<3x7xi32> // CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[GENERIC:.+]] // CHECK-SAME{literal} [[0], [1, 2, 3]] : tensor<3x7xi32> into tensor<3x1x1x7xi32> - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x1x7xi8>) -> tensor<3x1x1x7xi32> + %scale = tosa.const_shape { value = dense<[2, 1, 2, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x1x7xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x1x7xi32> // CHECK: return %[[EXPAND]] return %resize : tensor<3x1x1x7xi32> @@ -184,7 +207,10 @@ func.func @resize_nearest_int(%arg0: tensor<1x15x13x1xi8>) -> () { // CHECK: linalg.yield %[[EXTRACT]] // Round to the nearest index. - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x23x179x1xi8> + %scale = tosa.const_shape { value = dense<[11, 7, 89, 6]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x15x13x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x23x179x1xi8> return } @@ -193,7 +219,7 @@ func.func @resize_nearest_int(%arg0: tensor<1x15x13x1xi8>) -> () { // CHECK-LABEL: @resize_bilinear_int // CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]: func.func @resize_bilinear_int(%arg0: tensor<1x19x20x1xi8>) { - // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x304x320x1xi48> + // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x289x305x1xi48> // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK: %[[IDX_0:.+]] = linalg.index 0 // CHECK: %[[IDX_1:.+]] = linalg.index 1 @@ -285,7 +311,10 @@ func.func @resize_bilinear_int(%arg0: tensor<1x19x20x1xi8>) { // CHECK: linalg.yield %[[RESULT]] // Round to the nearest index. - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x19x20x1xi8>) -> tensor<1x304x320x1xi48> + %scale = tosa.const_shape { value = dense<[16, 1, 16, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x19x20x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x289x305x1xi48> return } @@ -349,7 +378,10 @@ func.func @resize_nearest_fp32(%input: tensor<1x50x48x1xf32>) -> () { // CHECK: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[IDX0]], %[[IDY]], %[[IDX]], %[[IDX3]]] // CHECK: linalg.yield %[[EXTRACT]] - %output = "tosa.resize"(%input) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor<1x1600x1536x1xf32> + %scale = tosa.const_shape { value = dense<[64, 2, 64, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-31, -31]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[31, 31]> : tensor<2xindex> } : () -> !tosa.shape<2> + %output = tosa.resize %input, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x50x48x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x1600x1536x1xf32> return } @@ -357,7 +389,7 @@ func.func @resize_nearest_fp32(%input: tensor<1x50x48x1xf32>) -> () { // CHECK-LABEL: @resize_bilinear_fp func.func @resize_bilinear_fp(%input: tensor<1x23x24x1xf32>) -> () { - // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x92x96x1xf32> + // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x89x93x1xf32> // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK: %[[IDX_0:.+]] = linalg.index 0 // CHECK: %[[IDX_1:.+]] = linalg.index 1 @@ -441,7 +473,10 @@ func.func @resize_bilinear_fp(%input: tensor<1x23x24x1xf32>) -> () { // CHECK: linalg.yield %[[RESULT]] // Round by bilinear interpolation - %output = "tosa.resize"(%input) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x24x1xf32>) -> tensor<1x92x96x1xf32> + %scale = tosa.const_shape { value = dense<[4, 1, 4, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %output = tosa.resize %input, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x23x24x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x89x93x1xf32> return } @@ -455,7 +490,10 @@ func.func @resize_dyn(%input: tensor) -> () { // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]] // CHECK: %[[INIT:.+]] = tensor.empty(%[[BATCH]]) : tensor // CHECK: %[[GENERIC:.+]] = linalg.generic - %output = "tosa.resize"(%input) { scale = array, offset = array, border = array, mode = "BILINEAR" } : (tensor) -> (tensor) + %scale = tosa.const_shape { value = dense<[4, 2, 4, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-1, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[1, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %output = tosa.resize %input, %scale, %offset, %border { mode = "BILINEAR" } : (tensor, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> (tensor) return } @@ -463,14 +501,17 @@ func.func @resize_dyn(%input: tensor) -> () { // CHECK-LABEL: @resize_bilinear_int48 func.func @resize_bilinear_int48(%arg0: tensor<1x19x19x1xi16>) { - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x19x19x1xi16>) -> tensor<1x289x289x1xi48> + %scale = tosa.const_shape { value = dense<[16, 1, 16, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x19x19x1xi16>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x289x289x1xi48> return } // ----- // CHECK-LABEL: skip_interpolate_bilinear_i8 -func.func @skip_interpolate_bilinear_i8(%arg0 : tensor<3x1x2x7xi8>) -> tensor<3x1x5x7xi32> { +func.func @skip_interpolate_bilinear_i8(%arg0 : tensor<3x1x2x7xi8>) -> tensor<3x1x4x7xi32> { // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK: %[[BATCH:.+]] = linalg.index 0 // CHECK: %[[CHANNEL:.+]] = linalg.index 3 @@ -486,14 +527,17 @@ func.func @skip_interpolate_bilinear_i8(%arg0 : tensor<3x1x2x7xi8>) -> tensor<3x // CHECK: %[[ADD:.+]] = arith.addi %[[MUL0]], %[[MUL1]] // CHECK: %[[RES:.+]] = arith.muli %[[ADD]], %[[C2]] // CHECK: linalg.yield %[[RES]] - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x2x7xi8>) -> tensor<3x1x5x7xi32> + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x2x7xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x4x7xi32> // CHECK: return %[[GENERIC]] - return %resize : tensor<3x1x5x7xi32> + return %resize : tensor<3x1x4x7xi32> } // CHECK-LABEL: skip_interpolate_bilinear_f32 -func.func @skip_interpolate_bilinear_f32(%arg0 : tensor<3x1x2x7xf32>) -> tensor<3x1x5x7xf32> { +func.func @skip_interpolate_bilinear_f32(%arg0 : tensor<3x1x2x7xf32>) -> tensor<3x1x4x7xf32> { // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK: %[[BATCH:.+]] = linalg.index 0 : index // CHECK: %[[CHANNEL:.+]] = linalg.index 3 : index @@ -505,8 +549,11 @@ func.func @skip_interpolate_bilinear_f32(%arg0 : tensor<3x1x2x7xf32>) -> tensor< // CHECK: %[[MUL1:.+]] = arith.mulf %[[EXTRACT1]], %[[DX]] // CHECK: %[[ADD:.+]] = arith.addf %[[MUL0]], %[[MUL1]] // CHECK: linalg.yield %[[ADD]] - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<3x1x2x7xf32>) -> tensor<3x1x5x7xf32> + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x2x7xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x4x7xf32> // CHECK: return %[[GENERIC]] - return %resize : tensor<3x1x5x7xf32> + return %resize : tensor<3x1x4x7xf32> } diff --git a/mlir/test/Dialect/EmitC/invalid_ops.mlir b/mlir/test/Dialect/EmitC/invalid_ops.mlir index a0d8d7f59de11..13bd96f6d9fb4 100644 --- a/mlir/test/Dialect/EmitC/invalid_ops.mlir +++ b/mlir/test/Dialect/EmitC/invalid_ops.mlir @@ -566,3 +566,59 @@ func.func @emitc_switch() { } return } + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op requires operands for each placeholder in the format string}} + emitc.verbatim "" args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op expected '}' after unescaped '{' at end of string}} + emitc.verbatim "{} + {} {" args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op requires operands for each placeholder in the format string}} + emitc.verbatim "abc" args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op requires operands for each placeholder in the format string}} + emitc.verbatim "{}" args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op requires operands for each placeholder in the format string}} + emitc.verbatim "{} {} {}" args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op expected '}' after unescaped '{'}} + emitc.verbatim "{ " args %arg0, %arg1 : !emitc.ptr, i32 + return +} + +// ----- + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + // expected-error @+1 {{'emitc.verbatim' op expected '}' after unescaped '{'}} + emitc.verbatim "{a} " args %arg0, %arg1 : !emitc.ptr, i32 + return +} diff --git a/mlir/test/Dialect/EmitC/ops.mlir b/mlir/test/Dialect/EmitC/ops.mlir index 7fd0a2d020397..645009bcc3c36 100644 --- a/mlir/test/Dialect/EmitC/ops.mlir +++ b/mlir/test/Dialect/EmitC/ops.mlir @@ -238,6 +238,18 @@ emitc.verbatim "#endif // __cplusplus" emitc.verbatim "typedef int32_t i32;" emitc.verbatim "typedef float f32;" +// The value is not interpreted as format string if there are no operands. +emitc.verbatim "{} { }" + +func.func @test_verbatim(%arg0 : !emitc.ptr, %arg1 : i32) { + emitc.verbatim "{} + {};" args %arg0, %arg1 : !emitc.ptr, i32 + + // Check there is no ambiguity whether %a is the argument to the emitc.verbatim op. + emitc.verbatim "a" + %a = "emitc.constant"(){value = 42 : i32} : () -> i32 + + return +} emitc.global @uninit : i32 emitc.global @myglobal_int : i32 = 4 diff --git a/mlir/test/Dialect/Linalg/specialize-generic-ops-fail.mlir b/mlir/test/Dialect/Linalg/specialize-generic-ops-fail.mlir index 542a7ed4a198b..357f2c11a7936 100644 --- a/mlir/test/Dialect/Linalg/specialize-generic-ops-fail.mlir +++ b/mlir/test/Dialect/Linalg/specialize-generic-ops-fail.mlir @@ -6,11 +6,26 @@ // CHECK-LABEL: @transpose_and_broadcast // CHECK: linalg.generic func.func @transpose_and_broadcast(%arg0: tensor<7x8xf32>, %arg1: tensor<8x7x9xf32>) -> tensor<8x7x9xf32> { - %0 = linalg.generic - {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} - ins(%arg0 : tensor<7x8xf32>) outs(%arg1 : tensor<8x7x9xf32>) { - ^bb0(%in: f32, %out: f32): - linalg.yield %in : f32 + %res = linalg.generic { + indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"] + } ins(%arg0 : tensor<7x8xf32>) outs(%arg1 : tensor<8x7x9xf32>) { + ^bb0(%in: f32, %out: f32): + linalg.yield %in : f32 } -> tensor<8x7x9xf32> - return %0 : tensor<8x7x9xf32> + return %res : tensor<8x7x9xf32> +} + +// ----- + +#map = affine_map<(d0) -> (d0)> +// CHECK-LABEL: @neither_permutation_nor_broadcast +// CHECK: linalg.generic +func.func @neither_permutation_nor_broadcast(%init : tensor<8xi32>) -> tensor<8xi32> { + %res = linalg.generic { + indexing_maps = [#map], iterator_types = ["parallel"] + } outs(%init: tensor<8xi32>) { + ^bb0(%out: i32): + linalg.yield %out: i32 + } -> tensor<8xi32> + return %res : tensor<8xi32> } diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir index 81d071e6bbba3..bf7c4134af12e 100644 --- a/mlir/test/Dialect/Math/polynomial-approximation.mlir +++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir @@ -81,6 +81,116 @@ func.func @erf_scalar(%arg0: f32) -> f32 { return %0 : f32 } +// CHECK-LABEL: func @erfc_scalar( +// CHECK-SAME: %[[val_arg0:.*]]: f32) -> f32 { +// CHECK-DAG: %[[c127_i32:.*]] = arith.constant 127 : i32 +// CHECK-DAG: %[[c23_i32:.*]] = arith.constant 23 : i32 +// CHECK-DAG: %[[cst:.*]] = arith.constant 1.270000e+02 : f32 +// CHECK-DAG: %[[cst_0:.*]] = arith.constant -1.270000e+02 : f32 +// CHECK-DAG: %[[cst_1:.*]] = arith.constant 8.880000e+01 : f32 +// CHECK-DAG: %[[cst_2:.*]] = arith.constant -8.780000e+01 : f32 +// CHECK-DAG: %[[cst_3:.*]] = arith.constant 0.166666657 : f32 +// CHECK-DAG: %[[cst_4:.*]] = arith.constant 0.0416657962 : f32 +// CHECK-DAG: %[[cst_5:.*]] = arith.constant 0.00833345205 : f32 +// CHECK-DAG: %[[cst_6:.*]] = arith.constant 0.00139819994 : f32 +// CHECK-DAG: %[[cst_7:.*]] = arith.constant 1.98756912E-4 : f32 +// CHECK-DAG: %[[cst_8:.*]] = arith.constant 2.12194442E-4 : f32 +// CHECK-DAG: %[[cst_9:.*]] = arith.constant -0.693359375 : f32 +// CHECK-DAG: %[[cst_10:.*]] = arith.constant 1.44269502 : f32 +// CHECK-DAG: %[[cst_11:.*]] = arith.constant 0.276978403 : f32 +// CHECK-DAG: %[[cst_12:.*]] = arith.constant -0.0927639827 : f32 +// CHECK-DAG: %[[cst_13:.*]] = arith.constant -0.166031361 : f32 +// CHECK-DAG: %[[cst_14:.*]] = arith.constant 0.164055392 : f32 +// CHECK-DAG: %[[cst_15:.*]] = arith.constant -0.0542046614 : f32 +// CHECK-DAG: %[[cst_16:.*]] = arith.constant -8.059920e-03 : f32 +// CHECK-DAG: %[[cst_17:.*]] = arith.constant 0.00863227434 : f32 +// CHECK-DAG: %[[cst_18:.*]] = arith.constant 0.00131355342 : f32 +// CHECK-DAG: %[[cst_19:.*]] = arith.constant -0.0012307521 : f32 +// CHECK-DAG: %[[cst_20:.*]] = arith.constant -4.01139259E-4 : f32 +// CHECK-DAG: %[[cst_true:.*]] = arith.constant true +// CHECK-DAG: %[[cst_21:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[cst_22:.*]] = arith.constant 1.000000e+00 : f32 +// CHECK-DAG: %[[cst_23:.*]] = arith.constant 5.000000e-01 : f32 +// CHECK-DAG: %[[cst_24:.*]] = arith.constant -4.000000e+00 : f32 +// CHECK-DAG: %[[cst_25:.*]] = arith.constant -2.000000e+00 : f32 +// CHECK-DAG: %[[cst_26:.*]] = arith.constant 2.000000e+00 : f32 +// CHECK-DAG: %[[cst_27:.*]] = arith.constant 0x7F800000 : f32 +// CHECK-DAG: %[[cst_28:.*]] = arith.constant 10.0546875 : f32 +// CHECK: %[[val_2:.*]] = math.absf %[[val_arg0]] : f32 +// CHECK-NEXT: %[[val_3:.*]] = arith.addf %[[val_2]], %[[cst_26]] : f32 +// CHECK-NEXT: %[[val_4:.*]] = arith.divf %[[cst_22]], %[[val_3]] : f32 +// CHECK-NEXT: %[[val_5:.*]] = math.fma %[[cst_24]], %[[val_4]], %[[cst_22]] : f32 +// CHECK-NEXT: %[[val_6:.*]] = arith.addf %[[val_5]], %[[cst_22]] : f32 +// CHECK-NEXT: %[[val_7:.*]] = math.fma %[[val_6]], %[[cst_25]], %[[val_2]] : f32 +// CHECK-NEXT: %[[val_8:.*]] = arith.negf %[[val_2]] : f32 +// CHECK-NEXT: %[[val_9:.*]] = math.fma %[[val_8]], %[[val_5]], %[[val_7]] : f32 +// CHECK-NEXT: %[[val_10:.*]] = math.fma %[[val_4]], %[[val_9]], %[[val_5]] : f32 +// CHECK-NEXT: %[[val_11:.*]] = math.fma %[[cst_20]], %[[val_10]], %[[cst_19]] : f32 +// CHECK-NEXT: %[[val_12:.*]] = math.fma %[[val_11]], %[[val_10]], %[[cst_18]] : f32 +// CHECK-NEXT: %[[val_13:.*]] = math.fma %[[val_12]], %[[val_10]], %[[cst_17]] : f32 +// CHECK-NEXT: %[[val_14:.*]] = math.fma %[[val_13]], %[[val_10]], %[[cst_16]] : f32 +// CHECK-NEXT: %[[val_15:.*]] = math.fma %[[val_14]], %[[val_10]], %[[cst_15]] : f32 +// CHECK-NEXT: %[[val_16:.*]] = math.fma %[[val_15]], %[[val_10]], %[[cst_14]] : f32 +// CHECK-NEXT: %[[val_17:.*]] = math.fma %[[val_16]], %[[val_10]], %[[cst_13]] : f32 +// CHECK-NEXT: %[[val_18:.*]] = math.fma %[[val_17]], %[[val_10]], %[[cst_12]] : f32 +// CHECK-NEXT: %[[val_19:.*]] = math.fma %[[val_18]], %[[val_10]], %[[cst_11]] : f32 +// CHECK-NEXT: %[[val_20:.*]] = math.fma %[[cst_26]], %[[val_2]], %[[cst_22]] : f32 +// CHECK-NEXT: %[[val_21:.*]] = arith.divf %[[cst_22]], %[[val_20]] : f32 +// CHECK-NEXT: %[[val_22:.*]] = math.fma %[[val_19]], %[[val_21]], %[[val_21]] : f32 +// CHECK-NEXT: %[[val_23:.*]] = arith.negf %[[val_2]] : f32 +// CHECK-NEXT: %[[val_24:.*]] = math.fma %[[val_22]], %[[val_23]], %[[cst_23]] : f32 +// CHECK-NEXT: %[[val_25:.*]] = arith.subf %[[val_19]], %[[val_22]] : f32 +// CHECK-NEXT: %[[val_26:.*]] = math.fma %[[val_24]], %[[cst_26]], %[[val_25]] : f32 +// CHECK-NEXT: %[[val_27:.*]] = math.fma %[[val_26]], %[[val_21]], %[[val_22]] : f32 +// CHECK-NEXT: %[[val_28:.*]] = arith.mulf %[[val_2]], %[[val_2]] : f32 +// CHECK-NEXT: %[[val_29:.*]] = arith.negf %[[val_28]] : f32 +// CHECK-NEXT: %[[val_30:.*]] = arith.cmpf uge, %[[val_29]], %[[cst_2]] : f32 +// CHECK-NEXT: %[[val_31:.*]] = arith.select %[[val_30]], %[[val_29]], %[[cst_2]] : f32 +// CHECK-NEXT: %[[val_32:.*]] = arith.cmpf ule, %[[val_31]], %[[cst_1]] : f32 +// CHECK-NEXT: %[[val_33:.*]] = arith.select %[[val_32]], %[[val_31]], %[[cst_1]] : f32 +// CHECK-NEXT: %[[val_34:.*]] = math.fma %[[val_33]], %[[cst_10]], %[[cst_23]] : f32 +// CHECK-NEXT: %[[val_35:.*]] = math.floor %[[val_34]] : f32 +// CHECK-NEXT: %[[val_36:.*]] = arith.cmpf uge, %[[val_35]], %[[cst_0]] : f32 +// CHECK-NEXT: %[[val_37:.*]] = arith.select %[[val_36]], %[[val_35]], %[[cst_0]] : f32 +// CHECK-NEXT: %[[val_38:.*]] = arith.cmpf ule, %[[val_37]], %[[cst]] : f32 +// CHECK-NEXT: %[[val_39:.*]] = arith.select %[[val_38]], %[[val_37]], %[[cst]] : f32 +// CHECK-NEXT: %[[val_40:.*]] = math.fma %[[cst_9]], %[[val_39]], %[[val_33]] : f32 +// CHECK-NEXT: %[[val_41:.*]] = math.fma %[[cst_8]], %[[val_39]], %[[val_40]] : f32 +// CHECK-NEXT: %[[val_42:.*]] = math.fma %[[val_41]], %[[cst_7]], %[[cst_6]] : f32 +// CHECK-NEXT: %[[val_43:.*]] = math.fma %[[val_42]], %[[val_41]], %[[cst_5]] : f32 +// CHECK-NEXT: %[[val_44:.*]] = math.fma %[[val_43]], %[[val_41]], %[[cst_4]] : f32 +// CHECK-NEXT: %[[val_45:.*]] = math.fma %[[val_44]], %[[val_41]], %[[cst_3]] : f32 +// CHECK-NEXT: %[[val_46:.*]] = math.fma %[[val_45]], %[[val_41]], %[[cst_23]] : f32 +// CHECK-NEXT: %[[val_47:.*]] = arith.mulf %[[val_41]], %[[val_41]] : f32 +// CHECK-NEXT: %[[val_48:.*]] = math.fma %[[val_46]], %[[val_47]], %[[val_41]] : f32 +// CHECK-NEXT: %[[val_49:.*]] = arith.addf %[[val_48]], %[[cst_22]] : f32 +// CHECK-NEXT: %[[val_50:.*]] = arith.fptosi %[[val_39]] : f32 to i32 +// CHECK-NEXT: %[[val_51:.*]] = arith.addi %[[val_50]], %[[c127_i32]] : i32 +// CHECK-NEXT: %[[val_52:.*]] = arith.shli %[[val_51]], %[[c23_i32]] : i32 +// CHECK-NEXT: %[[val_53:.*]] = arith.bitcast %[[val_52]] : i32 to f32 +// CHECK-NEXT: %[[val_54:.*]] = arith.mulf %[[val_49]], %[[val_53]] : f32 +// CHECK-NEXT: %[[val_55:.*]] = arith.negf %[[val_2]] : f32 +// CHECK-NEXT: %[[val_56:.*]] = math.fma %[[val_55]], %[[val_2]], %[[val_28]] : f32 +// CHECK-NEXT: %[[val_57:.*]] = arith.mulf %[[val_27]], %[[val_54]] : f32 +// CHECK-NEXT: %[[val_58:.*]] = arith.mulf %[[val_57]], %[[val_56]] : f32 +// CHECK-NEXT: %[[val_59:.*]] = math.fma %[[val_27]], %[[val_54]], %[[val_58]] : f32 +// CHECK-NEXT: %[[val_60:.*]] = arith.cmpf olt, %[[val_2]], %[[cst_27]] : f32 +// CHECK-NEXT: %[[val_61:.*]] = arith.xori %[[val_60]], %[[cst_true]] : i1 +// CHECK-NEXT: %[[val_62:.*]] = arith.addf %[[val_arg0]], %[[val_arg0]] : f32 +// CHECK-NEXT: %[[val_63:.*]] = arith.select %[[val_61]], %[[val_62]], %[[val_59]] : f32 +// CHECK-NEXT: %[[val_64:.*]] = arith.cmpf ogt, %[[val_2]], %[[cst_28]] : f32 +// CHECK-NEXT: %[[val_65:.*]] = arith.select %[[val_64]], %[[cst_21]], %[[val_63]] : f32 +// CHECK-NEXT: %[[val_66:.*]] = arith.cmpf olt, %[[val_arg0]], %[[cst_21]] : f32 +// CHECK-NEXT: %[[val_67:.*]] = arith.subf %[[cst_26]], %[[val_65]] : f32 +// CHECK-NEXT: %[[val_68:.*]] = arith.select %[[val_66]], %[[val_67]], %[[val_65]] : f32 +// CHECK-NEXT: return %[[val_68]] : f32 +// CHECK-NEXT: } + +func.func @erfc_scalar(%arg0: f32) -> f32 { + %0 = math.erfc %arg0 : f32 + return %0 : f32 +} + // CHECK-LABEL: func @erf_vector( // CHECK-SAME: %[[arg0:.*]]: vector<8xf32>) -> vector<8xf32> { // CHECK: %[[zero:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index 02b0af17564d4..d7f468bed3d3d 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -2842,3 +2842,20 @@ func.func @missing_workshare(%idx : index) { } return } + +// ----- + // expected-error @below {{op expected terminator to be a DeclareMapperInfoOp}} + omp.declare_mapper @missing_declareMapperInfo : !llvm.struct<"mytype", (array<1024 x i32>)> { + ^bb0(%arg0: !llvm.ptr): + omp.terminator + } + +// ----- +llvm.func @invalid_mapper(%0 : !llvm.ptr) { + %1 = omp.map.info var_ptr(%0 : !llvm.ptr, !llvm.struct<"my_type", (i32)>) mapper(@my_mapper) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""} + // expected-error @below {{invalid mapper id}} + omp.target_data map_entries(%1 : !llvm.ptr) { + omp.terminator + } + llvm.return +} diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index aca63600876aa..e318afbebbf0c 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -879,6 +879,15 @@ cleanup { omp.yield } +// CHECK: omp.declare_mapper @my_mapper : !llvm.struct<"my_type", (i32)> +omp.declare_mapper @my_mapper : !llvm.struct<"my_type", (i32)> { +^bb0(%arg: !llvm.ptr): + // CHECK: %[[DECL_MAP_INFO:.*]] = omp.map.info var_ptr(%{{.*}} : !llvm.ptr, !llvm.struct<"my_type", (i32)>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} + %decl_map_info = omp.map.info var_ptr(%arg : !llvm.ptr, !llvm.struct<"my_type", (i32)>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} + // CHECK: omp.declare_mapper.info map_entries(%[[DECL_MAP_INFO]] : !llvm.ptr) + omp.declare_mapper.info map_entries(%decl_map_info : !llvm.ptr) +} + // CHECK-LABEL: func @wsloop_reduction func.func @wsloop_reduction(%lb : index, %ub : index, %step : index) { %c1 = arith.constant 1 : i32 @@ -2537,13 +2546,13 @@ func.func @omp_targets_with_map_bounds(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> () // CHECK: %[[C_12:.*]] = llvm.mlir.constant(2 : index) : i64 // CHECK: %[[C_13:.*]] = llvm.mlir.constant(2 : index) : i64 // CHECK: %[[BOUNDS1:.*]] = omp.map.bounds lower_bound(%[[C_11]] : i64) upper_bound(%[[C_10]] : i64) stride(%[[C_12]] : i64) start_idx(%[[C_13]] : i64) - // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""} + // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.array<10 x i32>) mapper(@my_mapper) map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""} %6 = llvm.mlir.constant(9 : index) : i64 %7 = llvm.mlir.constant(1 : index) : i64 %8 = llvm.mlir.constant(2 : index) : i64 %9 = llvm.mlir.constant(2 : index) : i64 %10 = omp.map.bounds lower_bound(%7 : i64) upper_bound(%6 : i64) stride(%8 : i64) start_idx(%9 : i64) - %mapv2 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%10) -> !llvm.ptr {name = ""} + %mapv2 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>) mapper(@my_mapper) map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%10) -> !llvm.ptr {name = ""} // CHECK: omp.target map_entries(%[[MAP0]] -> {{.*}}, %[[MAP1]] -> {{.*}} : !llvm.ptr, !llvm.ptr) omp.target map_entries(%mapv1 -> %arg2, %mapv2 -> %arg3 : !llvm.ptr, !llvm.ptr) { diff --git a/mlir/test/Dialect/SPIRV/IR/control-flow-ops.mlir b/mlir/test/Dialect/SPIRV/IR/control-flow-ops.mlir index 8496448759f0c..1d1e2840a579a 100644 --- a/mlir/test/Dialect/SPIRV/IR/control-flow-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/control-flow-ops.mlir @@ -789,3 +789,15 @@ func.func @unreachable() { // expected-error @+1 {{cannot be used in reachable block}} spirv.Unreachable } + +// ----- + +//===----------------------------------------------------------------------===// +// spirv.Kill +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: func @kill +func.func @kill() { + // CHECK: spirv.Kill + spirv.Kill +} diff --git a/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir b/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir index bd3c665013136..8eb48a34e61e8 100644 --- a/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.module(spirv.module(inline{default-pipeline=''}))' | FileCheck %s +// RUN: mlir-opt %s --split-input-file --pass-pipeline='builtin.module(spirv.module(inline{default-pipeline=''}))' | FileCheck %s spirv.module Logical GLSL450 { spirv.func @callee() "None" { @@ -246,5 +246,24 @@ spirv.module Logical GLSL450 { } } +// ----- + +spirv.module Logical GLSL450 { + // CHECK-LABEL: @callee + spirv.func @callee() -> () "None" { + // CHECK-NEXT: spirv.Kill + spirv.Kill + } + + // CHECK-LABEL: @do_not_inline_kill + spirv.func @do_not_inline_kill() -> () "None" { + // CHECK-NOT: spirv.Kill + // CHECK-NEXT: spirv.FunctionCall @callee() : () -> () + spirv.FunctionCall @callee() : () -> () + // CHECK-NEXT: spirv.Return + spirv.Return + } +} + // TODO: Add tests for inlining structured control flow into // structured control flow. diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index 24d572244a9b0..0e177a076ee7a 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -721,7 +721,10 @@ func.func @single_bit_reshape() -> tensor<1xi1> { // CHECK-LABEL: @fold_resize_nearest func.func @fold_resize_nearest(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> { // CHECK: return %arg0 - %resize = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR" , scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x15x13x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x15x13x1xi8> return %resize : tensor<1x15x13x1xi8> } @@ -730,7 +733,10 @@ func.func @fold_resize_nearest(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1 // CHECK-LABEL: @fold_resize_bilinear func.func @fold_resize_bilinear(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> { // CHECK: return %arg0 - %resize = tosa.resize %arg0 {mode = "BILINEAR" , scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> + %scale = tosa.const_shape { value = dense<[2, 2, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x15x13x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x15x13x1xi8> return %resize : tensor<1x15x13x1xi8> } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index f35c37a1ef70f..1307da88d1e64 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -1113,3 +1113,88 @@ func.func @test_mul_non_broadcast(%arg0: tensor<13x21x2xf32>, %arg1: tensor<3x1x %0 = tosa.mul %arg0, %arg1, %shift : (tensor<13x21x2xf32>, tensor<3x1x3xf32>, tensor<1xi8>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } + +// ----- +// CHECK-LABEL: test_resize_invalid_scale_values +func.func @test_resize_invalid_scale_values(%arg0: tensor<1x8x8x8xf32>) -> tensor { + %scale = tosa.const_shape { value = dense<[2, 0, -1, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op expect all scale values to be > 0, got 2, 0, -1, 2}} + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x8x8x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor + return %1 : tensor +} + +// ----- + +// CHECK-LABEL: test_resize_invalid_wholly_divisible_height +func.func @test_resize_invalid_wholly_divisible_height(%arg0: tensor<1x8x8x8xf32>) -> tensor<1x8x8x8xf32> { + %scale = tosa.const_shape { value = dense<[1, 3, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op expected (input_height - 1) * scale_y_n - offset_y + border_y to be wholly divisible by scale_y_d, got ((8 - 1) * 1 - 0 + 0) / 3}} + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x8x8x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x8x8x8xf32> + return %1 : tensor<1x8x8x8xf32> +} + +// ----- + +// CHECK-LABEL: test_resize_invalid_output_height +func.func @test_resize_invalid_output_height(%arg0: tensor<1x8x8x8xf32>) -> tensor<1x9x8x8xf32> { + %scale = tosa.const_shape { value = dense<[2, 1, 1, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op calculated output height did not match expected: calculated=15, expected=9}} + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x8x8x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x9x8x8xf32> + return %1 : tensor<1x9x8x8xf32> +} + +// ----- + +// CHECK-LABEL: test_resize_invalid_wholly_divisible_width +func.func @test_resize_invalid_wholly_divisible_width(%arg0: tensor<1x8x8x8xf32>) -> tensor<1x8x8x8xf32> { + %scale = tosa.const_shape { value = dense<[1, 1, 1, 3]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op expected (input_width - 1) * scale_x_n - offset_x + border_x to be wholly divisible by scale_x_d, got ((8 - 1) * 1 - 0 + 0) / 3}} + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x8x8x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x8x8x8xf32> + return %1 : tensor<1x8x8x8xf32> +} + +// ----- + +// CHECK-LABEL: test_resize_invalid_output_width +func.func @test_resize_invalid_output_width(%arg0: tensor<1x8x8x8xf32>) -> tensor<1x8x9x8xf32> { + %scale = tosa.const_shape { value = dense<[1, 1, 2, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op calculated output width did not match expected: calculated=15, expected=9}} + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x8x8x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x8x9x8xf32> + return %1 : tensor<1x8x9x8xf32> +} + +// ----- + +// CHECK-LABEL: broadcast_resize_nearest_f32 +func.func @broadcast_resize_nearest_f32(%arg0 : tensor<3x1x1x7xf32>) -> tensor<3x1x5x7xf32> { + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op calculated output width did not match expected: calculated=1, expected=5}} + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<3x1x1x7xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x1x5x7xf32> + + return %resize : tensor<3x1x5x7xf32> +} + +// ----- + +// CHECK-LABEL: broadcast_resize_bilinear_i8 +func.func @broadcast_resize_bilinear_i8(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x4x5x7xi32> { + %scale = tosa.const_shape { value = dense<[2, 1, 3, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + // expected-error@+1 {{'tosa.resize' op calculated output height did not match expected: calculated=1, expected=4}} + %resize = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<3x1x1x7xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<3x4x5x7xi32> + + return %resize : tensor<3x4x5x7xi32> +} diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index a7f76f2d0fa64..6f49195d30e97 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -676,20 +676,26 @@ func.func @test_transpose_conv2d_stride_x(%arg0: tensor<1x32x32x8xf32>, %arg1: t // ----- -func.func @test_resize_scale_y(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> { +func.func @test_resize_scale_y(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x7970x64x8xf32> { + %scale = tosa.const_shape { value = dense<[257, 1, 4, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-1, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[1, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.resize' op failed level check: scale_y_n/scale_y_d <= MAX_SCALE}} - %1 = "tosa.resize"(%arg0) { scale = array, offset = array, border = array, mode = "BILINEAR"} : - (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> - return %1 : tensor<1x64x64x8xf32> + %1 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : + (tensor<1x32x32x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x7970x64x8xf32> + return %1 : tensor<1x7970x64x8xf32> } // ----- -func.func @test_resize_scale_x(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> { +func.func @test_resize_scale_x(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x7970x8xf32> { + %scale = tosa.const_shape { value = dense<[4, 2, 257, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-1, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[1, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.resize' op failed level check: scale_x_n/scale_x_d <= MAX_SCALE}} - %1 = "tosa.resize"(%arg0) { scale = array, offset = array, border = array, mode = "BILINEAR"} : - (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> - return %1 : tensor<1x64x64x8xf32> + %1 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : + (tensor<1x32x32x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x64x7970x8xf32> + return %1 : tensor<1x64x7970x8xf32> } // ----- diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index faac8b7c1ff93..4a7e48ea702bf 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -670,7 +670,10 @@ func.func @test_scatter(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x26xi32>, %a // ----- // CHECK-LABEL: resize func.func @test_resize(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> { - %1 = tosa.resize %arg0 { scale = array, offset = array, border = array, mode = "BILINEAR" } : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> + %scale = tosa.const_shape { value = dense<[4, 2, 4, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-1, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[1, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %1 = tosa.resize %arg0, %scale, %offset, %border { mode = "BILINEAR" } : (tensor<1x32x32x8xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor<1x64x64x8xf32> return %1 : tensor<1x64x64x8xf32> } diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index da0637ae0fc9c..fa590ab495ada 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -995,8 +995,11 @@ func.func @transpose_conv2d_strided(%arg0: tensor<1x5x7x1xf32>, %arg1: tensor<1x // CHECK-LABEL: @resize_int_horizontal func.func @resize_int_horizontal(%arg0: tensor<1x15x13x1xi8>) { + %scale = tosa.const_shape { value = dense<[11, 7, 89, 6]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x23x179x1xi8> - %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x15x13x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } @@ -1004,8 +1007,11 @@ func.func @resize_int_horizontal(%arg0: tensor<1x15x13x1xi8>) { // CHECK-LABEL: @resize_int_vertical func.func @resize_int_vertical(%arg0: tensor<1x49x42x1xi16>) { + %scale = tosa.const_shape { value = dense<[37, 16, 219, 41]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x112x220x1xi16> - %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x49x42x1xi16>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x49x42x1xi16>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } @@ -1013,8 +1019,11 @@ func.func @resize_int_vertical(%arg0: tensor<1x49x42x1xi16>) { // CHECK-LABEL: @resize_int_power_of_two_upscale func.func @resize_int_power_of_two_upscale(%arg0: tensor<1x23x19x1xi8>) { + %scale = tosa.const_shape { value = dense<[16, 1, 16, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x353x289x1xi32> - %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x19x1xi8>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x23x19x1xi8>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } @@ -1022,24 +1031,33 @@ func.func @resize_int_power_of_two_upscale(%arg0: tensor<1x23x19x1xi8>) { // CHECK-LABEL: @resize_int_power_of_two_upscale_offsetted func.func @resize_int_power_of_two_upscale_offsetted(%arg0: tensor<1x41x26x1xi16>) { + %scale = tosa.const_shape { value = dense<[16, 2, 16, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-7, -7]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[7, 7]> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x328x208x1xi48> - %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x41x26x1xi16>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x41x26x1xi16>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } // ----- // CHECK-LABEL: @resize_fp_horizontal func.func @resize_fp_horizontal(%arg0: tensor<1x50x48x1xf32>) { + %scale = tosa.const_shape { value = dense<[15, 7, 84, 47]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x106x85x1xf32> - %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x50x48x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } // ----- // CHECK-LABEL: @resize_fp_vertical func.func @resize_fp_vertical(%arg0: tensor<1x50x48x1xf32>) { + %scale = tosa.const_shape { value = dense<[127, 49, 12, 47]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x128x13x1xf32> - %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x50x48x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } @@ -1047,8 +1065,11 @@ func.func @resize_fp_vertical(%arg0: tensor<1x50x48x1xf32>) { // CHECK-LABEL: @resize_fp_power_of_two_upscale func.func @resize_fp_power_of_two_upscale(%arg0: tensor<1x23x23x1xf32>) { + %scale = tosa.const_shape { value = dense<[4, 1, 4, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<0> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x89x89x1xf32> - %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x23x1xf32>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "BILINEAR"} : (tensor<1x23x23x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } @@ -1056,8 +1077,11 @@ func.func @resize_fp_power_of_two_upscale(%arg0: tensor<1x23x23x1xf32>) { // CHECK-LABEL: @resize_fp_power_of_two_upscale_offsetted func.func @resize_fp_power_of_two_upscale_offsetted(%arg0: tensor<1x50x48x1xf32>) { + %scale = tosa.const_shape { value = dense<[64, 2, 64, 2]> : tensor<4xindex> } : () -> !tosa.shape<4> + %offset = tosa.const_shape { value = dense<[-31, -31]> : tensor<2xindex> } : () -> !tosa.shape<2> + %border = tosa.const_shape { value = dense<[31, 31]> : tensor<2xindex> } : () -> !tosa.shape<2> // CHECK: -> tensor<1x1600x1536x1xf32> - %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0, %scale, %offset, %border {mode = "NEAREST_NEIGHBOR"} : (tensor<1x50x48x1xf32>, !tosa.shape<4>, !tosa.shape<2>, !tosa.shape<2>) -> tensor return } diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir index 6705905633e0f..dfc79a19e6cc6 100644 --- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir +++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir @@ -1,5 +1,8 @@ // RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s +// TODO: Review the usage of `in_bounds` and remove where not affecting the +// generated output. + /// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, 0, d3)> ///---------------------------------------------------------------------------------------- @@ -106,8 +109,8 @@ func.func @xfer_write_minor_identity_transposed_map_masked( /// (neither a minor identity nor transposed minor identity map) /// OUT 1: vector.broadcast + vector.transfer_write /// (transposed minor identity) -/// OUT 2: vector.transfer_write -> vector.broadcast + vector.transpose + vector.transfer_write -/// (minor identity) +/// OUT 2: vector.transfer_write -> vector.broadcast + vector.transpose +/// + vector.transfer_write (minor identity) ///---------------------------------------------------------------------------------------- // CHECK-LABEL: func.func @xfer_write_non_minor_identity( @@ -233,16 +236,16 @@ func.func @xfer_write_non_minor_identity_masked_scalable( // CHECK-LABEL: func @xfer_write_non_minor_identity_masked_2 // CHECK-SAME: %[[DEST:.*]]: tensor // CHECK-SAME: %[[VEC:.*]]: vector<14x8x16xf32> -// CHECK-SAME: %[[DIM:.*]]: index, %[[IDX:.*]]: index) -> tensor +// CHECK-SAME: %[[MASK:.*]]: vector<14x8x16xi1> +// CHECK-SAME: %[[DIM:.*]]: index // CHECK-NOT: vector.broadcast -// CHECK: vector.mask %0 { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<14x8x16xf32>, tensor } : vector<14x8x16xi1> -> tensor +// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<14x8x16xf32>, tensor } : vector<14x8x16xi1> -> tensor func.func @xfer_write_non_minor_identity_masked_2( %dest : tensor, %vec : vector<14x8x16xf32>, - %dim : index, + %mask: vector<14x8x16xi1>, %idx: index) -> tensor { - %mask = vector.create_mask %dim, %dim, %dim : vector<14x8x16xi1> %res = vector.mask %mask { vector.transfer_write %vec, %dest[%idx, %idx, %idx, %idx] { in_bounds = [false, false, true], @@ -259,29 +262,27 @@ func.func @xfer_write_non_minor_identity_masked_2( /// /// IN: vector.transfer_read /// (_transposed_ minor identity permutation map, with 0 or more broadcast dims) -/// OUT: vector.transpose + vector.transfer_write +/// OUT: vector.transfer_read + vector.broadcast + vector.transpose /// (minor identity permutation map with 0 or more leading broadcast dims) ///---------------------------------------------------------------------------------------- /// TODO: Inner broadcast dim - see also the block at the bottom of this file -// CHECK-LABEL: func.func @xfer_read_minor_identity_tranposed_with_mask +// CHECK-LABEL: func.func @xfer_read_minor_identity_transposed_with_mask // CHECK-SAME: %[[MEM:.*]]: memref, -// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x4x2xf32> { +// CHECK-SAME: %[[MASK:.*]]: vector<2x4xi1> +// CHECK-SAME: %[[IDX:.*]]: index // CHECK: %[[PASS_THROUGH:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_2]], %[[DIM_1]] : vector<2x4xi1> // CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[IDX]], %[[IDX]]], %[[PASS_THROUGH]], %[[MASK]] {in_bounds = [true, true]} : memref, vector<2x4xf32> // CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x4xf32> to vector<8x2x4xf32> // CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x4xf32> to vector<8x4x2xf32> // CHECK: return %[[TRANSPOSE]] : vector<8x4x2xf32> -func.func @xfer_read_minor_identity_tranposed_with_mask( +func.func @xfer_read_minor_identity_transposed_with_mask( %mem: memref, - %dim_1: index, - %dim_2: index, + %mask: vector<2x4xi1>, %idx: index) -> (vector<8x4x2xf32>) { %pad = arith.constant 0.000000e+00 : f32 - %mask = vector.create_mask %dim_2, %dim_1 : vector<2x4xi1> %res = vector.transfer_read %mem[%idx, %idx], %pad, %mask { in_bounds = [true, true, true], permutation_map = affine_map<(d0, d1) -> (0, d1, d0)> @@ -290,24 +291,22 @@ func.func @xfer_read_minor_identity_tranposed_with_mask( return %res : vector<8x4x2xf32> } -// CHECK-LABEL: func.func @xfer_read_minor_identity_tranposed_with_mask_scalable( +// CHECK-LABEL: func.func @xfer_read_minor_identity_transposed_with_mask_scalable( // CHECK-SAME: %[[MEM:.*]]: memref, -// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x[4]x2xf32> { +// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1> +// CHECK-SAME: %[[IDX:.*]]: index // CHECK: %[[PAD:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_2]], %[[DIM_1]] : vector<2x[4]xi1> // CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[IDX]], %[[IDX]]], %[[PAD]], %[[MASK]] {in_bounds = [true, true]} : memref, vector<2x[4]xf32> // CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x[4]xf32> to vector<8x2x[4]xf32> // CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x[4]xf32> to vector<8x[4]x2xf32> // CHECK: return %[[TRANSPOSE]] : vector<8x[4]x2xf32> -func.func @xfer_read_minor_identity_tranposed_with_mask_scalable( +func.func @xfer_read_minor_identity_transposed_with_mask_scalable( %mem: memref, - %dim_1: index, - %dim_2: index, + %mask: vector<2x[4]xi1>, %idx: index) -> (vector<8x[4]x2xf32>) { %pad = arith.constant 0.000000e+00 : f32 - %mask = vector.create_mask %dim_2, %dim_1 : vector<2x[4]xi1> %res = vector.transfer_read %mem[%idx, %idx], %pad, %mask { in_bounds = [true, true, true], permutation_map = affine_map<(d0, d1) -> (0, d1, d0)> @@ -319,24 +318,26 @@ func.func @xfer_read_minor_identity_tranposed_with_mask_scalable( // Masked version is not supported // CHECK-LABEL: func @xfer_read_minor_identity_transposed_masked( -// CHECK-SAME: %[[DEST:.*]]: tensor, -// CHECK-SAME: %[[MASK:.*]]: vector<4x1xi1> +// CHECK-SAME: %[[DEST:.*]]: tensor, +// CHECK-SAME: %[[MASK:.*]]: vector<2x4xi1> +// CHECK-SAME: %[[IDX:.*]]: index // CHECK-NOT: vector.transpose -// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}}: tensor, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32> +// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}}: tensor, vector<8x4x2xf32> } : vector<2x4xi1> -> vector<8x4x2xf32> func.func @xfer_read_minor_identity_transposed_masked( - %dest: tensor, - %mask : vector<4x1xi1>, - %idx: index) { + %dest: tensor, + %mask: vector<2x4xi1>, + %idx: index) -> (vector<8x4x2xf32>) { %pad = arith.constant 0.000000e+00 : f32 - %3 = vector.mask %mask { + + %res = vector.mask %mask { vector.transfer_read %dest[%idx, %idx], %pad { - permutation_map = affine_map<(d0, d1) -> (d1, 0, d0)> - } : tensor, vector<1x4x4xf32> - } : vector<4x1xi1> -> vector<1x4x4xf32> + in_bounds = [true, true, true], + permutation_map = affine_map<(d0, d1) -> (0, d1, d0)> + } : tensor, vector<8x4x2xf32> + } : vector<2x4xi1> -> vector<8x4x2xf32> - "test.some_use"(%3) : (vector<1x4x4xf32>) -> () - return + return %res : vector<8x4x2xf32> } // CHECK-LABEL: func.func @xfer_read_minor_identity_transposed_masked_scalable( @@ -346,7 +347,7 @@ func.func @xfer_read_minor_identity_transposed_masked( // CHECK: %[[T_READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}} : tensor, vector<8x[4]x2xf32> } : vector<2x[4]xi1> -> vector<8x[4]x2xf32> func.func @xfer_read_minor_identity_transposed_masked_scalable( %dest: tensor, - %mask : vector<2x[4]xi1>, + %mask: vector<2x[4]xi1>, %idx: index) -> vector<8x[4]x2xf32> { %pad = arith.constant 0.000000e+00 : f32 @@ -388,17 +389,16 @@ func.func @xfer_read_minor_identitiy_bcast_dims_scalable( // CHECK-LABEL: func.func @xfer_read_minor_identitiy_bcast_dims_masked // CHECK-SAME: %[[MEM:.*]]: memref, -// CHECK-SAME: %[[DIM:.*]]: index, +// CHECK-SAME: %[[MASK:.*]]: vector<[4]x3xi1> // CHECK-SAME: %[[IDX:.*]]: index) -> vector<8x[4]x2x3xf32> { // CHECK-NOT: vector.broadcast -// CHECK: %[[MASK:.*]] = vector.mask %0 { vector.transfer_read %[[MEM]]{{.*}} : memref, vector<8x[4]x2x3xf32> } : vector<[4]x3xi1> -> vector<8x[4]x2x3xf32> +// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[MEM]]{{.*}} : memref, vector<8x[4]x2x3xf32> } : vector<[4]x3xi1> -> vector<8x[4]x2x3xf32> func.func @xfer_read_minor_identitiy_bcast_dims_masked( %mem: memref, - %dim: index, + %mask: vector<[4]x3xi1>, %idx: index) -> vector<8x[4]x2x3xf32> { %pad = arith.constant 0.000000e+00 : f32 - %mask = vector.create_mask %dim, %dim: vector<[4]x3xi1> %res = vector.mask %mask { vector.transfer_read %mem[%idx, %idx, %idx, %idx], %pad { diff --git a/mlir/test/Dialect/X86Vector/cvt-packed-f32-to-bf16.mlir b/mlir/test/Dialect/X86Vector/cvt-packed-f32-to-bf16.mlir new file mode 100644 index 0000000000000..c97c52f01c3b0 --- /dev/null +++ b/mlir/test/Dialect/X86Vector/cvt-packed-f32-to-bf16.mlir @@ -0,0 +1,24 @@ +// REQUIRES: target=x86{{.*}} + +// RUN: mlir-opt %s \ +// RUN: -convert-vector-to-llvm="enable-x86vector" -convert-to-llvm \ +// RUN: -reconcile-unrealized-casts | \ +// RUN: mlir-translate --mlir-to-llvmir | \ +// RUN: llc -mcpu=sapphirerapids | \ +// RUN: FileCheck %s + +func.func @avx512bf16_cvt_packed_f32_to_bf16_256( + %a: vector<8xf32>) -> vector<8xbf16> { + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<8xf32> -> vector<8xbf16> + return %0 : vector<8xbf16> +} +// CHECK-LABEL: avx512bf16_cvt_packed_f32_to_bf16_256: +// CHECK: vcvtneps2bf16{{.*}}%xmm + +func.func @avx512bf16_cvt_packed_f32_to_bf16_512( + %a: vector<16xf32>) -> vector<16xbf16> { + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<16xf32> -> vector<16xbf16> + return %0 : vector<16xbf16> +} +// CHECK-LABEL: avx512bf16_cvt_packed_f32_to_bf16_512: +// CHECK: vcvtneps2bf16{{.*}}%ymm diff --git a/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir b/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir index ed9177eaec9ce..59be7dd75b3b0 100644 --- a/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir +++ b/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir @@ -70,6 +70,24 @@ func.func @avx512bf16_dot_512(%src: vector<16xf32>, %a: vector<32xbf16>, return %0 : vector<16xf32> } +// CHECK-LABEL: func @avx512bf16_cvt_packed_f32_to_bf16_256 +func.func @avx512bf16_cvt_packed_f32_to_bf16_256( + %a: vector<8xf32>) -> (vector<8xbf16>) +{ + // CHECK: x86vector.avx512.intr.cvtneps2bf16.256 + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<8xf32> -> vector<8xbf16> + return %0 : vector<8xbf16> +} + +// CHECK-LABEL: func @avx512bf16_cvt_packed_f32_to_bf16_512 +func.func @avx512bf16_cvt_packed_f32_to_bf16_512( + %a: vector<16xf32>) -> (vector<16xbf16>) +{ + // CHECK: x86vector.avx512.intr.cvtneps2bf16.512 + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<16xf32> -> vector<16xbf16> + return %0 : vector<16xbf16> +} + // CHECK-LABEL: func @avx_rsqrt func.func @avx_rsqrt(%a: vector<8xf32>) -> (vector<8xf32>) { diff --git a/mlir/test/Dialect/X86Vector/roundtrip.mlir b/mlir/test/Dialect/X86Vector/roundtrip.mlir index cf74a7ee60255..0d00448c63da8 100644 --- a/mlir/test/Dialect/X86Vector/roundtrip.mlir +++ b/mlir/test/Dialect/X86Vector/roundtrip.mlir @@ -74,6 +74,26 @@ func.func @avx512bf16_dot_512(%src: vector<16xf32>, %a: vector<32xbf16>, return %0 : vector<16xf32> } +// CHECK-LABEL: func @avx512bf16_cvt_packed_f32_to_bf16_256 +func.func @avx512bf16_cvt_packed_f32_to_bf16_256( + %a: vector<8xf32>) -> (vector<8xbf16>) +{ + // CHECK: x86vector.avx512.cvt.packed.f32_to_bf16 {{.*}} : + // CHECK-SAME: vector<8xf32> -> vector<8xbf16> + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<8xf32> -> vector<8xbf16> + return %0 : vector<8xbf16> +} + +// CHECK-LABEL: func @avx512bf16_cvt_packed_f32_to_bf16_512 +func.func @avx512bf16_cvt_packed_f32_to_bf16_512( + %a: vector<16xf32>) -> (vector<16xbf16>) +{ + // CHECK: x86vector.avx512.cvt.packed.f32_to_bf16 {{.*}} : + // CHECK-SAME: vector<16xf32> -> vector<16xbf16> + %0 = x86vector.avx512.cvt.packed.f32_to_bf16 %a : vector<16xf32> -> vector<16xbf16> + return %0 : vector<16xbf16> +} + // CHECK-LABEL: func @avx_rsqrt func.func @avx_rsqrt(%a: vector<8xf32>) -> (vector<8xf32>) { diff --git a/mlir/test/IR/op-asm-interface.mlir b/mlir/test/IR/op-asm-interface.mlir index a9c199e3dc973..086dc7da421c2 100644 --- a/mlir/test/IR/op-asm-interface.mlir +++ b/mlir/test/IR/op-asm-interface.mlir @@ -22,3 +22,63 @@ func.func @block_argument_name_from_op_asm_type_interface() { } return } + +// ----- + +//===----------------------------------------------------------------------===// +// Test OpAsmTypeInterface +//===----------------------------------------------------------------------===// + +func.func @result_name_from_op_asm_type_interface_asmprinter() { + // CHECK-LABEL: @result_name_from_op_asm_type_interface_asmprinter + // CHECK: %op_asm_type_interface + %0 = "test.result_name_from_type_interface"() : () -> !test.op_asm_type_interface + return +} + +// ----- + +// i1 does not have OpAsmTypeInterface, should not get named. +func.func @result_name_from_op_asm_type_interface_not_all() { + // CHECK-LABEL: @result_name_from_op_asm_type_interface_not_all + // CHECK-NOT: %op_asm_type_interface + // CHECK: %0:2 + %0:2 = "test.result_name_from_type_interface"() : () -> (!test.op_asm_type_interface, i1) + return +} + +// ----- + +func.func @block_argument_name_from_op_asm_type_interface_asmprinter() { + // CHECK-LABEL: @block_argument_name_from_op_asm_type_interface_asmprinter + // CHECK: ^bb0(%op_asm_type_interface + test.block_argument_name_from_type_interface { + ^bb0(%arg0: !test.op_asm_type_interface): + "test.terminator"() : ()->() + } + return +} + +// ----- + +// CHECK: !op_asm_type_interface_type = +!type = !test.op_asm_type_interface + +func.func @alias_from_op_asm_type_interface() { + %0 = "test.result_name_from_type"() : () -> !type + return +} + +// ----- + +//===----------------------------------------------------------------------===// +// Test OpAsmAttrInterface +//===----------------------------------------------------------------------===// + +// CHECK: #op_asm_attr_interface_test +#attr = #test.op_asm_attr_interface + +func.func @test_op_asm_attr_interface() { + %1 = "test.result_name_from_type"() {attr = #attr} : () -> !test.op_asm_type_interface + return +} diff --git a/mlir/test/Target/Cpp/file.mlir b/mlir/test/Target/Cpp/file.mlir new file mode 100644 index 0000000000000..262d3cdac27d4 --- /dev/null +++ b/mlir/test/Target/Cpp/file.mlir @@ -0,0 +1,29 @@ +// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s --check-prefix NO-FILTER +// RUN: mlir-translate -mlir-to-cpp -file-id=non-existing %s | FileCheck %s --check-prefix NON-EXISTING +// RUN: mlir-translate -mlir-to-cpp -file-id=file_one %s | FileCheck %s --check-prefix FILE-ONE +// RUN: mlir-translate -mlir-to-cpp -file-id=file_two %s | FileCheck %s --check-prefix FILE-TWO + + +// NO-FILTER-NOT: func_one +// NO-FILTER-NOT: func_two + +// NON-EXISTING-NOT: func_one +// NON-EXISTING-NOT: func_two + +// FILE-ONE: func_one +// FILE-ONE-NOT: func_two + +// FILE-TWO-NOT: func_one +// FILE-TWO: func_two + +emitc.file "file_one" { + emitc.func @func_one(%arg: f32) { + emitc.return + } +} + +emitc.file "file_two" { + emitc.func @func_two(%arg: f32) { + emitc.return + } +} diff --git a/mlir/test/Target/Cpp/verbatim.mlir b/mlir/test/Target/Cpp/verbatim.mlir index 10465dd781a81..e1f225c112a43 100644 --- a/mlir/test/Target/Cpp/verbatim.mlir +++ b/mlir/test/Target/Cpp/verbatim.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s -// RUN: mlir-translate -mlir-to-cpp -declare-variables-at-top %s | FileCheck %s +// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s --match-full-lines +// RUN: mlir-translate -mlir-to-cpp -declare-variables-at-top %s | FileCheck %s --match-full-lines emitc.verbatim "#ifdef __cplusplus" @@ -19,3 +19,23 @@ emitc.verbatim "typedef int32_t i32;" // CHECK-NEXT: typedef int32_t i32; emitc.verbatim "typedef float f32;" // CHECK-NEXT: typedef float f32; + +emitc.func @func(%arg: f32) { + // CHECK: void func(float [[V0:[^ ]*]]) { + %a = "emitc.variable"(){value = #emitc.opaque<"">} : () -> !emitc.array<3x7xi32> + // CHECK: int32_t [[A:[^ ]*]][3][7]; + + emitc.verbatim "{}" args %arg : f32 + // CHECK: [[V0]] + + emitc.verbatim "{} {{a" args %arg : f32 + // CHECK-NEXT: [[V0]] {a + + emitc.verbatim "#pragma my var={} property" args %arg : f32 + // CHECK-NEXT: #pragma my var=[[V0]] property + + emitc.verbatim "#pragma my2 var={} property" args %a : !emitc.array<3x7xi32> + // CHECK-NEXT: #pragma my2 var=[[A]] property + + emitc.return +} diff --git a/mlir/test/Target/LLVMIR/Import/alias.ll b/mlir/test/Target/LLVMIR/Import/alias.ll index 3ab68a7d8fb81..23eaecb9c9fa7 100644 --- a/mlir/test/Target/LLVMIR/Import/alias.ll +++ b/mlir/test/Target/LLVMIR/Import/alias.ll @@ -68,14 +68,6 @@ entry: @a1 = private alias i32, ptr @g1 @a2 = private alias ptr, ptr @a1 -; CHECK: llvm.mlir.alias private @a1 {dso_local} : i32 { -; CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @g1 : !llvm.ptr -; CHECK: llvm.return %[[ADDR]] : !llvm.ptr -; CHECK: } -; CHECK: llvm.mlir.alias private @a2 {dso_local} : !llvm.ptr { -; CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @a1 : !llvm.ptr -; CHECK-NEXT: llvm.return %[[ADDR]] : !llvm.ptr -; CHECK-NEXT: } ; CHECK: llvm.mlir.global internal constant @g2() {addr_space = 0 : i32, dso_local} : !llvm.ptr { ; CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @a1 : !llvm.ptr @@ -86,3 +78,13 @@ entry: ; CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @a2 : !llvm.ptr ; CHECK-NEXT: llvm.return %[[ADDR]] : !llvm.ptr ; CHECK-NEXT: } + +; CHECK: llvm.mlir.alias private @a1 {dso_local} : i32 { +; CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @g1 : !llvm.ptr +; CHECK-NEXT: llvm.return %[[ADDR]] : !llvm.ptr +; CHECK-NEXT: } + +; CHECK: llvm.mlir.alias private @a2 {dso_local} : !llvm.ptr { +; CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @a1 : !llvm.ptr +; CHECK-NEXT: llvm.return %[[ADDR]] : !llvm.ptr +; CHECK-NEXT: } diff --git a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir index 7f21095763a39..02b84ff66a0d3 100644 --- a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir +++ b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir @@ -485,3 +485,120 @@ llvm.func @_QPopenmp_target_data_update() { // CHECK: call void @__tgt_target_data_update_mapper(ptr @2, i64 -1, i32 1, ptr %[[BASEPTRS_VAL_2]], ptr %[[PTRS_VAL_2]], ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr null) // CHECK: ret void + +// ----- + +omp.declare_mapper @_QQFmy_testmy_mapper : !llvm.struct<"_QFmy_testTmy_type", (i32)> { +^bb0(%arg0: !llvm.ptr): + %0 = llvm.mlir.constant(0 : i32) : i32 + %1 = llvm.getelementptr %arg0[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"_QFmy_testTmy_type", (i32)> + %2 = omp.map.info var_ptr(%1 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "var%data"} + %3 = omp.map.info var_ptr(%arg0 : !llvm.ptr, !llvm.struct<"_QFmy_testTmy_type", (i32)>) map_clauses(tofrom) capture(ByRef) members(%2 : [0] : !llvm.ptr) -> !llvm.ptr {name = "var", partial_map = true} + omp.declare_mapper.info map_entries(%3, %2 : !llvm.ptr, !llvm.ptr) +} + +llvm.func @_QPopenmp_target_data_mapper() { + %0 = llvm.mlir.constant(1 : i64) : i64 + %1 = llvm.alloca %0 x !llvm.struct<"_QFmy_testTmy_type", (i32)> {bindc_name = "a"} : (i64) -> !llvm.ptr + %2 = omp.map.info var_ptr(%1 : !llvm.ptr, !llvm.struct<"_QFmy_testTmy_type", (i32)>) mapper(@_QQFmy_testmy_mapper) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "a"} + omp.target_data map_entries(%2 : !llvm.ptr) { + %3 = llvm.mlir.constant(10 : i32) : i32 + %4 = llvm.getelementptr %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"_QFmy_testTmy_type", (i32)> + llvm.store %3, %4 : i32, !llvm.ptr + omp.terminator + } + llvm.return +} + +// CHECK: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] +// CHECK: @.offload_maptypes = private unnamed_addr constant [1 x i64] [i64 3] +// CHECK-LABEL: define void @_QPopenmp_target_data_mapper +// CHECK: %[[VAL_0:.*]] = alloca [1 x ptr], align 8 +// CHECK: %[[VAL_1:.*]] = alloca [1 x ptr], align 8 +// CHECK: %[[VAL_2:.*]] = alloca [1 x ptr], align 8 +// CHECK: %[[VAL_3:.*]] = alloca %[[VAL_4:.*]], i64 1, align 8 +// CHECK: br label %[[VAL_5:.*]] +// CHECK: entry: ; preds = %[[VAL_6:.*]] +// CHECK: %[[VAL_7:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0 +// CHECK: store ptr %[[VAL_3]], ptr %[[VAL_7]], align 8 +// CHECK: %[[VAL_8:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0 +// CHECK: store ptr %[[VAL_3]], ptr %[[VAL_8]], align 8 +// CHECK: %[[VAL_9:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_2]], i64 0, i64 0 +// CHECK: store ptr @.omp_mapper._QQFmy_testmy_mapper, ptr %[[VAL_9]], align 8 +// CHECK: %[[VAL_10:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0 +// CHECK: %[[VAL_11:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0 +// CHECK: call void @__tgt_target_data_begin_mapper(ptr @4, i64 -1, i32 1, ptr %[[VAL_10]], ptr %[[VAL_11]], ptr @.offload_sizes, ptr @.offload_maptypes, ptr @.offload_mapnames, ptr %[[VAL_2]]) +// CHECK: %[[VAL_12:.*]] = getelementptr %[[VAL_4]], ptr %[[VAL_3]], i32 0, i32 0 +// CHECK: store i32 10, ptr %[[VAL_12]], align 4 +// CHECK: %[[VAL_13:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0 +// CHECK: %[[VAL_14:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0 +// CHECK: call void @__tgt_target_data_end_mapper(ptr @4, i64 -1, i32 1, ptr %[[VAL_13]], ptr %[[VAL_14]], ptr @.offload_sizes, ptr @.offload_maptypes, ptr @.offload_mapnames, ptr %[[VAL_2]]) +// CHECK: ret void + +// CHECK-LABEL: define internal void @.omp_mapper._QQFmy_testmy_mapper +// CHECK: entry: +// CHECK: %[[VAL_15:.*]] = udiv exact i64 %[[VAL_16:.*]], 4 +// CHECK: %[[VAL_17:.*]] = getelementptr %[[VAL_18:.*]], ptr %[[VAL_19:.*]], i64 %[[VAL_15]] +// CHECK: %[[VAL_20:.*]] = icmp sgt i64 %[[VAL_15]], 1 +// CHECK: %[[VAL_21:.*]] = and i64 %[[VAL_22:.*]], 8 +// CHECK: %[[VAL_23:.*]] = icmp ne ptr %[[VAL_24:.*]], %[[VAL_19]] +// CHECK: %[[VAL_25:.*]] = and i64 %[[VAL_22]], 16 +// CHECK: %[[VAL_26:.*]] = icmp ne i64 %[[VAL_25]], 0 +// CHECK: %[[VAL_27:.*]] = and i1 %[[VAL_23]], %[[VAL_26]] +// CHECK: %[[VAL_28:.*]] = or i1 %[[VAL_20]], %[[VAL_27]] +// CHECK: %[[VAL_29:.*]] = icmp eq i64 %[[VAL_21]], 0 +// CHECK: %[[VAL_30:.*]] = and i1 %[[VAL_28]], %[[VAL_29]] +// CHECK: br i1 %[[VAL_30]], label %[[VAL_31:.*]], label %[[VAL_32:.*]] +// CHECK: .omp.array..init: ; preds = %[[VAL_33:.*]] +// CHECK: %[[VAL_34:.*]] = mul nuw i64 %[[VAL_15]], 4 +// CHECK: %[[VAL_35:.*]] = and i64 %[[VAL_22]], -4 +// CHECK: %[[VAL_36:.*]] = or i64 %[[VAL_35]], 512 +// CHECK: call void @__tgt_push_mapper_component(ptr %[[VAL_37:.*]], ptr %[[VAL_24]], ptr %[[VAL_19]], i64 %[[VAL_34]], i64 %[[VAL_36]], ptr %[[VAL_38:.*]]) +// CHECK: br label %[[VAL_32]] +// CHECK: omp.arraymap.head: ; preds = %[[VAL_31]], %[[VAL_33]] +// CHECK: %[[VAL_39:.*]] = icmp eq ptr %[[VAL_19]], %[[VAL_17]] +// CHECK: br i1 %[[VAL_39]], label %[[VAL_40:.*]], label %[[VAL_41:.*]] +// CHECK: omp.arraymap.body: ; preds = %[[VAL_42:.*]], %[[VAL_32]] +// CHECK: %[[VAL_43:.*]] = phi ptr [ %[[VAL_19]], %[[VAL_32]] ], [ %[[VAL_44:.*]], %[[VAL_42]] ] +// CHECK: %[[VAL_45:.*]] = getelementptr %[[VAL_18]], ptr %[[VAL_43]], i32 0, i32 0 +// CHECK: %[[VAL_46:.*]] = call i64 @__tgt_mapper_num_components(ptr %[[VAL_37]]) +// CHECK: %[[VAL_47:.*]] = shl i64 %[[VAL_46]], 48 +// CHECK: %[[VAL_48:.*]] = add nuw i64 3, %[[VAL_47]] +// CHECK: %[[VAL_49:.*]] = and i64 %[[VAL_22]], 3 +// CHECK: %[[VAL_50:.*]] = icmp eq i64 %[[VAL_49]], 0 +// CHECK: br i1 %[[VAL_50]], label %[[VAL_51:.*]], label %[[VAL_52:.*]] +// CHECK: omp.type.alloc: ; preds = %[[VAL_41]] +// CHECK: %[[VAL_53:.*]] = and i64 %[[VAL_48]], -4 +// CHECK: br label %[[VAL_42]] +// CHECK: omp.type.alloc.else: ; preds = %[[VAL_41]] +// CHECK: %[[VAL_54:.*]] = icmp eq i64 %[[VAL_49]], 1 +// CHECK: br i1 %[[VAL_54]], label %[[VAL_55:.*]], label %[[VAL_56:.*]] +// CHECK: omp.type.to: ; preds = %[[VAL_52]] +// CHECK: %[[VAL_57:.*]] = and i64 %[[VAL_48]], -3 +// CHECK: br label %[[VAL_42]] +// CHECK: omp.type.to.else: ; preds = %[[VAL_52]] +// CHECK: %[[VAL_58:.*]] = icmp eq i64 %[[VAL_49]], 2 +// CHECK: br i1 %[[VAL_58]], label %[[VAL_59:.*]], label %[[VAL_42]] +// CHECK: omp.type.from: ; preds = %[[VAL_56]] +// CHECK: %[[VAL_60:.*]] = and i64 %[[VAL_48]], -2 +// CHECK: br label %[[VAL_42]] +// CHECK: omp.type.end: ; preds = %[[VAL_59]], %[[VAL_56]], %[[VAL_55]], %[[VAL_51]] +// CHECK: %[[VAL_61:.*]] = phi i64 [ %[[VAL_53]], %[[VAL_51]] ], [ %[[VAL_57]], %[[VAL_55]] ], [ %[[VAL_60]], %[[VAL_59]] ], [ %[[VAL_48]], %[[VAL_56]] ] +// CHECK: call void @__tgt_push_mapper_component(ptr %[[VAL_37]], ptr %[[VAL_43]], ptr %[[VAL_45]], i64 4, i64 %[[VAL_61]], ptr @2) +// CHECK: %[[VAL_44]] = getelementptr %[[VAL_18]], ptr %[[VAL_43]], i32 1 +// CHECK: %[[VAL_62:.*]] = icmp eq ptr %[[VAL_44]], %[[VAL_17]] +// CHECK: br i1 %[[VAL_62]], label %[[VAL_63:.*]], label %[[VAL_41]] +// CHECK: omp.arraymap.exit: ; preds = %[[VAL_42]] +// CHECK: %[[VAL_64:.*]] = icmp sgt i64 %[[VAL_15]], 1 +// CHECK: %[[VAL_65:.*]] = and i64 %[[VAL_22]], 8 +// CHECK: %[[VAL_66:.*]] = icmp ne i64 %[[VAL_65]], 0 +// CHECK: %[[VAL_67:.*]] = and i1 %[[VAL_64]], %[[VAL_66]] +// CHECK: br i1 %[[VAL_67]], label %[[VAL_68:.*]], label %[[VAL_40]] +// CHECK: .omp.array..del: ; preds = %[[VAL_63]] +// CHECK: %[[VAL_69:.*]] = mul nuw i64 %[[VAL_15]], 4 +// CHECK: %[[VAL_70:.*]] = and i64 %[[VAL_22]], -4 +// CHECK: %[[VAL_71:.*]] = or i64 %[[VAL_70]], 512 +// CHECK: call void @__tgt_push_mapper_component(ptr %[[VAL_37]], ptr %[[VAL_24]], ptr %[[VAL_19]], i64 %[[VAL_69]], i64 %[[VAL_71]], ptr %[[VAL_38]]) +// CHECK: br label %[[VAL_40]] +// CHECK: omp.done: ; preds = %[[VAL_68]], %[[VAL_63]], %[[VAL_32]] +// CHECK: ret void diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir index eac28c57e2ab4..84a30277e63da 100644 --- a/mlir/test/Target/LLVMIR/rocdl.mlir +++ b/mlir/test/Target/LLVMIR/rocdl.mlir @@ -851,12 +851,23 @@ llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, %numRecords : i32, %flags : i32) -> !llvm.ptr<8> { // CHECK-LABEL: rocdl.make.buffer.rsrc - // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) // CHECK: ret ptr addrspace(8) %[[rsrc]] %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : !llvm.ptr to !llvm.ptr<8> llvm.return %rsrc : !llvm.ptr<8> } +llvm.func @rocdl.make.buffer.rsrc.p7.p1(%ptr : !llvm.ptr<1>, + %stride : i16, + %numRecords : i32, + %flags : i32) -> !llvm.ptr<7> { + // CHECK-LABEL: rocdl.make.buffer.rsrc.p7.p1 + // CHECK: %[[rsrc:.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + // CHECK: ret ptr addrspace(7) %[[rsrc]] + %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : <1> to <7> + llvm.return %rsrc : !llvm.ptr<7> +} + llvm.func @rocdl.wmma.fp8(%arg0 : vector<2 x i32>, %arg1 : vector<8xf32>) -> vector<8xf32> { // CHECK: call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32(<2 x i32> %{{.*}}, <2 x i32> %{{.*}}, <8 x float> %{{.*}}) %r0 = rocdl.wmma.f32.16x16x16.fp8_fp8 %arg0, %arg0, %arg1: (vector<2xi32>, vector<2xi32>, vector<8xf32>) -> vector<8xf32> diff --git a/mlir/test/Target/LLVMIR/x86vector.mlir b/mlir/test/Target/LLVMIR/x86vector.mlir index 1df03f10c9321..db1c10cd5cd37 100644 --- a/mlir/test/Target/LLVMIR/x86vector.mlir +++ b/mlir/test/Target/LLVMIR/x86vector.mlir @@ -62,37 +62,57 @@ llvm.func @LLVM_x86_vp2intersect_q_512(%a: vector<8xi64>, %b: vector<8xi64>) // CHECK-LABEL: define <4 x float> @LLVM_x86_avx512bf16_dpbf16ps_128 llvm.func @LLVM_x86_avx512bf16_dpbf16ps_128( - %arg0: vector<4xf32>, %arg1: vector<8xbf16>, %arg2: vector<8xbf16> + %src: vector<4xf32>, %a: vector<8xbf16>, %b: vector<8xbf16> ) -> vector<4xf32> { // CHECK: call <4 x float> @llvm.x86.avx512bf16.dpbf16ps.128( - %0 = "x86vector.avx512.intr.dpbf16ps.128"(%arg0, %arg1, %arg2) + %0 = "x86vector.avx512.intr.dpbf16ps.128"(%src, %a, %b) : (vector<4xf32>, vector<8xbf16>, vector<8xbf16>) -> vector<4xf32> llvm.return %0 : vector<4xf32> } // CHECK-LABEL: define <8 x float> @LLVM_x86_avx512bf16_dpbf16ps_256 llvm.func @LLVM_x86_avx512bf16_dpbf16ps_256( - %arg0: vector<8xf32>, %arg1: vector<16xbf16>, %arg2: vector<16xbf16> + %src: vector<8xf32>, %a: vector<16xbf16>, %b: vector<16xbf16> ) -> vector<8xf32> { // CHECK: call <8 x float> @llvm.x86.avx512bf16.dpbf16ps.256( - %0 = "x86vector.avx512.intr.dpbf16ps.256"(%arg0, %arg1, %arg2) + %0 = "x86vector.avx512.intr.dpbf16ps.256"(%src, %a, %b) : (vector<8xf32>, vector<16xbf16>, vector<16xbf16>) -> vector<8xf32> llvm.return %0 : vector<8xf32> } // CHECK-LABEL: define <16 x float> @LLVM_x86_avx512bf16_dpbf16ps_512 llvm.func @LLVM_x86_avx512bf16_dpbf16ps_512( - %arg0: vector<16xf32>, %arg1: vector<32xbf16>, %arg2: vector<32xbf16> + %src: vector<16xf32>, %a: vector<32xbf16>, %b: vector<32xbf16> ) -> vector<16xf32> { // CHECK: call <16 x float> @llvm.x86.avx512bf16.dpbf16ps.512( - %0 = "x86vector.avx512.intr.dpbf16ps.512"(%arg0, %arg1, %arg2) + %0 = "x86vector.avx512.intr.dpbf16ps.512"(%src, %a, %b) : (vector<16xf32>, vector<32xbf16>, vector<32xbf16>) -> vector<16xf32> llvm.return %0 : vector<16xf32> } +// CHECK-LABEL: define <8 x bfloat> @LLVM_x86_avx512bf16_cvtneps2bf16_256 +llvm.func @LLVM_x86_avx512bf16_cvtneps2bf16_256( + %a: vector<8xf32>) -> vector<8xbf16> +{ + // CHECK: call <8 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.256( + %0 = "x86vector.avx512.intr.cvtneps2bf16.256"(%a) + : (vector<8xf32>) -> vector<8xbf16> + llvm.return %0 : vector<8xbf16> +} + +// CHECK-LABEL: define <16 x bfloat> @LLVM_x86_avx512bf16_cvtneps2bf16_512 +llvm.func @LLVM_x86_avx512bf16_cvtneps2bf16_512( + %a: vector<16xf32>) -> vector<16xbf16> +{ + // CHECK: call <16 x bfloat> @llvm.x86.avx512bf16.cvtneps2bf16.512( + %0 = "x86vector.avx512.intr.cvtneps2bf16.512"(%a) + : (vector<16xf32>) -> vector<16xbf16> + llvm.return %0 : vector<16xbf16> +} + // CHECK-LABEL: define <8 x float> @LLVM_x86_avx_rsqrt_ps_256 llvm.func @LLVM_x86_avx_rsqrt_ps_256(%a: vector <8xf32>) -> vector<8xf32> { @@ -103,11 +123,11 @@ llvm.func @LLVM_x86_avx_rsqrt_ps_256(%a: vector <8xf32>) -> vector<8xf32> // CHECK-LABEL: define <8 x float> @LLVM_x86_avx_dp_ps_256 llvm.func @LLVM_x86_avx_dp_ps_256( - %arg0: vector<8xf32>, %arg1: vector<8xf32> + %a: vector<8xf32>, %b: vector<8xf32> ) -> vector<8xf32> { // CHECK: call <8 x float> @llvm.x86.avx.dp.ps.256( - %0 = llvm.mlir.constant(-1 : i8) : i8 - %1 = "x86vector.avx.intr.dp.ps.256"(%arg0, %arg1, %0) : (vector<8xf32>, vector<8xf32>, i8) -> vector<8xf32> + %c = llvm.mlir.constant(-1 : i8) : i8 + %1 = "x86vector.avx.intr.dp.ps.256"(%a, %b, %c) : (vector<8xf32>, vector<8xf32>, i8) -> vector<8xf32> llvm.return %1 : vector<8xf32> } diff --git a/mlir/test/Target/SPIRV/terminator.mlir b/mlir/test/Target/SPIRV/terminator.mlir index 065b68b9bdfbb..8338a575681f1 100644 --- a/mlir/test/Target/SPIRV/terminator.mlir +++ b/mlir/test/Target/SPIRV/terminator.mlir @@ -24,4 +24,10 @@ spirv.module Logical GLSL450 requires #spirv.vce { // CHECK-NOT: spirv.Unreachable spirv.Unreachable } + + // CHECK-LABEL: @kill + spirv.func @kill() -> () "None" { + // CHECK: spirv.Kill + spirv.Kill + } } diff --git a/mlir/test/Transforms/scf-loop-unroll.mlir b/mlir/test/Transforms/scf-loop-unroll.mlir index baf6b2970ac0e..0ef6ad15d4eb0 100644 --- a/mlir/test/Transforms/scf-loop-unroll.mlir +++ b/mlir/test/Transforms/scf-loop-unroll.mlir @@ -1,5 +1,6 @@ // RUN: mlir-opt %s --test-loop-unrolling="unroll-factor=3" -split-input-file -canonicalize | FileCheck %s // RUN: mlir-opt %s --test-loop-unrolling="unroll-factor=1" -split-input-file -canonicalize | FileCheck %s --check-prefix UNROLL-BY-1 +// RUN: mlir-opt %s --test-loop-unrolling="unroll-full=true" -split-input-file -canonicalize | FileCheck %s --check-prefix UNROLL-FULL // CHECK-LABEL: scf_loop_unroll_single func.func @scf_loop_unroll_single(%arg0 : f32, %arg1 : f32) -> f32 { @@ -56,3 +57,59 @@ func.func @scf_loop_unroll_factor_1_promote() -> () { // UNROLL-BY-1-NEXT: %[[C0:.*]] = arith.constant 0 : index // UNROLL-BY-1-NEXT: %{{.*}} = "test.foo"(%[[C0]]) : (index) -> i32 } + +// UNROLL-FULL-LABEL: func @scf_loop_unroll_full_single +// UNROLL-FULL-SAME: %[[ARG:.*]]: index) +func.func @scf_loop_unroll_full_single(%arg : index) -> index { + %0 = arith.constant 0 : index + %1 = arith.constant 1 : index + %2 = arith.constant 4 : index + %4 = scf.for %iv = %0 to %2 step %1 iter_args(%arg1 = %1) -> index { + %3 = arith.addi %arg1, %arg : index + scf.yield %3 : index + } + return %4 : index + // UNROLL-FULL: %[[C1:.*]] = arith.constant 1 : index + // UNROLL-FULL: %[[V0:.*]] = arith.addi %[[ARG]], %[[C1]] : index + // UNROLL-FULL: %[[V1:.*]] = arith.addi %[[V0]], %[[ARG]] : index + // UNROLL-FULL: %[[V2:.*]] = arith.addi %[[V1]], %[[ARG]] : index + // UNROLL-FULL: %[[V3:.*]] = arith.addi %[[V2]], %[[ARG]] : index + // UNROLL-FULL: return %[[V3]] : index +} + +// UNROLL-FULL-LABEL: func @scf_loop_unroll_full_outter_loops +// UNROLL-FULL-SAME: %[[ARG:.*]]: vector<4x4xindex>) +func.func @scf_loop_unroll_full_outter_loops(%arg0: vector<4x4xindex>) -> index { + %0 = arith.constant 0 : index + %1 = arith.constant 1 : index + %2 = arith.constant 4 : index + %6 = scf.for %arg1 = %0 to %2 step %1 iter_args(%it0 = %0) -> index { + %5 = scf.for %arg2 = %0 to %2 step %1 iter_args(%it1 = %it0) -> index { + %3 = vector.extract %arg0[%arg1, %arg2] : index from vector<4x4xindex> + %4 = arith.addi %3, %it1 : index + scf.yield %3 : index + } + scf.yield %5 : index + } + return %6 : index + // UNROLL-FULL: %[[C0:.*]] = arith.constant 0 : index + // UNROLL-FULL: %[[C1:.*]] = arith.constant 1 : index + // UNROLL-FULL: %[[C4:.*]] = arith.constant 4 : index + // UNROLL-FULL: %[[SUM0:.*]] = scf.for %[[IV:.*]] = %[[C0]] to %[[C4]] step %[[C1]] iter_args(%{{.*}} = %[[C0]]) + // UNROLL-FULL: %[[VAL:.*]] = vector.extract %[[ARG]][0, %[[IV]]] : index from vector<4x4xindex> + // UNROLL-FULL: scf.yield %[[VAL]] : index + // UNROLL-FULL: } + // UNROLL-FULL: %[[SUM1:.*]] = scf.for %[[IV:.*]] = %[[C0]] to %[[C4]] step %[[C1]] iter_args(%{{.*}} = %[[SUM0]]) + // UNROLL-FULL: %[[VAL:.*]] = vector.extract %[[ARG]][1, %[[IV]]] : index from vector<4x4xindex> + // UNROLL-FULL: scf.yield %[[VAL]] : index + // UNROLL-FULL: } + // UNROLL-FULL: %[[SUM2:.*]] = scf.for %[[IV:.*]] = %[[C0]] to %[[C4]] step %[[C1]] iter_args(%{{.*}} = %[[SUM1]]) + // UNROLL-FULL: %[[VAL:.*]] = vector.extract %[[ARG]][2, %[[IV]]] : index from vector<4x4xindex> + // UNROLL-FULL: scf.yield %[[VAL]] : index + // UNROLL-FULL: } + // UNROLL-FULL: %[[SUM3:.*]] = scf.for %[[IV:.*]] = %[[C0]] to %[[C4]] step %[[C1]] iter_args(%{{.*}} = %[[SUM2]]) + // UNROLL-FULL: %[[VAL:.*]] = vector.extract %[[ARG]][3, %[[IV]]] : index from vector<4x4xindex> + // UNROLL-FULL: scf.yield %[[VAL]] : index + // UNROLL-FULL: } + // UNROLL-FULL: return %[[SUM3]] : index +} diff --git a/mlir/test/lib/Dialect/SCF/TestLoopUnrolling.cpp b/mlir/test/lib/Dialect/SCF/TestLoopUnrolling.cpp index 8694a7f9bbd62..ced003305a7b8 100644 --- a/mlir/test/lib/Dialect/SCF/TestLoopUnrolling.cpp +++ b/mlir/test/lib/Dialect/SCF/TestLoopUnrolling.cpp @@ -42,10 +42,11 @@ struct TestLoopUnrollingPass TestLoopUnrollingPass(const TestLoopUnrollingPass &) {} explicit TestLoopUnrollingPass(uint64_t unrollFactorParam, unsigned loopDepthParam, - bool annotateLoopParam) { + bool annotateLoopParam, bool unrollFullParam) { unrollFactor = unrollFactorParam; loopDepth = loopDepthParam; annotateLoop = annotateLoopParam; + unrollFull = unrollFactorParam; } void getDependentDialects(DialectRegistry ®istry) const override { @@ -63,8 +64,12 @@ struct TestLoopUnrollingPass op->setAttr("unrolled_iteration", b.getUI32IntegerAttr(i)); } }; - for (auto loop : loops) - (void)loopUnrollByFactor(loop, unrollFactor, annotateFn); + for (auto loop : loops) { + if (unrollFull) + (void)loopUnrollFull(loop); + else + (void)loopUnrollByFactor(loop, unrollFactor, annotateFn); + } } Option unrollFactor{*this, "unroll-factor", llvm::cl::desc("Loop unroll factor."), @@ -77,6 +82,9 @@ struct TestLoopUnrollingPass llvm::cl::init(false)}; Option loopDepth{*this, "loop-depth", llvm::cl::desc("Loop depth."), llvm::cl::init(0)}; + Option unrollFull{*this, "unroll-full", + llvm::cl::desc("Full unroll loops."), + llvm::cl::init(false)}; }; } // namespace diff --git a/mlir/test/lib/Dialect/Test/TestAttrDefs.td b/mlir/test/lib/Dialect/Test/TestAttrDefs.td index 0fd272f85d39b..4b809c1c0a765 100644 --- a/mlir/test/lib/Dialect/Test/TestAttrDefs.td +++ b/mlir/test/lib/Dialect/Test/TestAttrDefs.td @@ -395,4 +395,14 @@ def TestCustomLocationAttr : Test_LocAttr<"TestCustomLocation"> { let assemblyFormat = "`<` $file `*` $line `>`"; } +// Test OpAsmAttrInterface. +def TestOpAsmAttrInterfaceAttr : Test_Attr<"TestOpAsmAttrInterface", + [DeclareAttrInterfaceMethods]> { + let mnemonic = "op_asm_attr_interface"; + let parameters = (ins "mlir::StringAttr":$value); + let assemblyFormat = [{ + `<` struct(params) `>` + }]; +} + #endif // TEST_ATTRDEFS diff --git a/mlir/test/lib/Dialect/Test/TestAttributes.cpp b/mlir/test/lib/Dialect/Test/TestAttributes.cpp index e09ea10906164..7c467308386f1 100644 --- a/mlir/test/lib/Dialect/Test/TestAttributes.cpp +++ b/mlir/test/lib/Dialect/Test/TestAttributes.cpp @@ -67,7 +67,7 @@ void CompoundAAttr::print(AsmPrinter &printer) const { //===----------------------------------------------------------------------===// Attribute TestDecimalShapeAttr::parse(AsmParser &parser, Type type) { - if (parser.parseLess()){ + if (parser.parseLess()) { return Attribute(); } SmallVector shape; @@ -316,6 +316,17 @@ static ParseResult parseCustomFloatAttr(AsmParser &p, StringAttr &typeStrAttr, return success(); } +//===----------------------------------------------------------------------===// +// TestOpAsmAttrInterfaceAttr +//===----------------------------------------------------------------------===// + +::mlir::OpAsmDialectInterface::AliasResult +TestOpAsmAttrInterfaceAttr::getAlias(::llvm::raw_ostream &os) const { + os << "op_asm_attr_interface_"; + os << getValue().getValue(); + return ::mlir::OpAsmDialectInterface::AliasResult::FinalAlias; +} + //===----------------------------------------------------------------------===// // Tablegen Generated Definitions //===----------------------------------------------------------------------===// diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index 2aa0658ab0e5d..cdc1237ec8c5a 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -955,6 +955,25 @@ def BlockArgumentNameFromTypeOp let assemblyFormat = "regions attr-dict-with-keyword"; } +// This is used to test OpAsmTypeInterface::getAsmName's integration with AsmPrinter +// for op result name when OpAsmOpInterface::getAsmResultNames is the default implementation +// i.e. does nothing. +def ResultNameFromTypeInterfaceOp + : TEST_Op<"result_name_from_type_interface", + [OpAsmOpInterface]> { + let results = (outs Variadic:$r); +} + +// This is used to test OpAsmTypeInterface::getAsmName's integration with AsmPrinter +// for block argument name when OpAsmOpInterface::getAsmBlockArgumentNames is the default implementation +// i.e. does nothing. +def BlockArgumentNameFromTypeInterfaceOp + : TEST_Op<"block_argument_name_from_type_interface", + [OpAsmOpInterface]> { + let regions = (region AnyRegion:$body); + let assemblyFormat = "regions attr-dict-with-keyword"; +} + // This is used to test the OpAsmOpInterface::getDefaultDialect() feature: // operations nested in a region under this op will drop the "test." dialect // prefix. diff --git a/mlir/test/lib/Dialect/Test/TestTypeDefs.td b/mlir/test/lib/Dialect/Test/TestTypeDefs.td index 6335701786ecc..c048f8b654ec2 100644 --- a/mlir/test/lib/Dialect/Test/TestTypeDefs.td +++ b/mlir/test/lib/Dialect/Test/TestTypeDefs.td @@ -399,7 +399,7 @@ def TestTypeVerification : Test_Type<"TestTypeVerification"> { } def TestTypeOpAsmTypeInterface : Test_Type<"TestTypeOpAsmTypeInterface", - [DeclareTypeInterfaceMethods]> { + [DeclareTypeInterfaceMethods]> { let mnemonic = "op_asm_type_interface"; } diff --git a/mlir/test/lib/Dialect/Test/TestTypes.cpp b/mlir/test/lib/Dialect/Test/TestTypes.cpp index 1ae7ac472d989..0c237440834ef 100644 --- a/mlir/test/lib/Dialect/Test/TestTypes.cpp +++ b/mlir/test/lib/Dialect/Test/TestTypes.cpp @@ -537,3 +537,9 @@ void TestTypeOpAsmTypeInterfaceType::getAsmName( OpAsmSetNameFn setNameFn) const { setNameFn("op_asm_type_interface"); } + +::mlir::OpAsmDialectInterface::AliasResult +TestTypeOpAsmTypeInterfaceType::getAlias(::llvm::raw_ostream &os) const { + os << "op_asm_type_interface_type"; + return ::mlir::OpAsmDialectInterface::AliasResult::FinalAlias; +} diff --git a/mlir/test/mlir-runner/math-polynomial-approx.mlir b/mlir/test/mlir-runner/math-polynomial-approx.mlir index 148ef25cead62..6ed03916f1e15 100644 --- a/mlir/test/mlir-runner/math-polynomial-approx.mlir +++ b/mlir/test/mlir-runner/math-polynomial-approx.mlir @@ -273,6 +273,77 @@ func.func @erf() { return } +// -------------------------------------------------------------------------- // +// Erfc. +// -------------------------------------------------------------------------- // +func.func @erfc_f32(%a : f32) { + %r = math.erfc %a : f32 + vector.print %r : f32 + return +} + +func.func @erfc_4xf32(%a : vector<4xf32>) { + %r = math.erfc %a : vector<4xf32> + vector.print %r : vector<4xf32> + return +} + +func.func @erfc() { + // CHECK: 1.00027 + %val1 = arith.constant -2.431864e-4 : f32 + call @erfc_f32(%val1) : (f32) -> () + + // CHECK: 0.257905 + %val2 = arith.constant 0.79999 : f32 + call @erfc_f32(%val2) : (f32) -> () + + // CHECK: 0.257899 + %val3 = arith.constant 0.8 : f32 + call @erfc_f32(%val3) : (f32) -> () + + // CHECK: 0.00467794 + %val4 = arith.constant 1.99999 : f32 + call @erfc_f32(%val4) : (f32) -> () + + // CHECK: 0.00467774 + %val5 = arith.constant 2.0 : f32 + call @erfc_f32(%val5) : (f32) -> () + + // CHECK: 1.13736e-07 + %val6 = arith.constant 3.74999 : f32 + call @erfc_f32(%val6) : (f32) -> () + + // CHECK: 1.13727e-07 + %val7 = arith.constant 3.75 : f32 + call @erfc_f32(%val7) : (f32) -> () + + // CHECK: 2 + %negativeInf = arith.constant 0xff800000 : f32 + call @erfc_f32(%negativeInf) : (f32) -> () + + // CHECK: 2, 2, 1.91376, 1.73145 + %vecVals1 = arith.constant dense<[-3.4028235e+38, -4.54318, -1.2130899, -7.8234202e-01]> : vector<4xf32> + call @erfc_4xf32(%vecVals1) : (vector<4xf32>) -> () + + // CHECK: 1, 1, 1, 0.878681 + %vecVals2 = arith.constant dense<[-1.1754944e-38, 0.0, 1.1754944e-38, 1.0793410e-01]> : vector<4xf32> + call @erfc_4xf32(%vecVals2) : (vector<4xf32>) -> () + + // CHECK: 0.0805235, 0.000931045, 6.40418e-08, 0 + %vecVals3 = arith.constant dense<[1.23578, 2.34093, 3.82342, 3.4028235e+38]> : vector<4xf32> + call @erfc_4xf32(%vecVals3) : (vector<4xf32>) -> () + + // CHECK: 0 + %inf = arith.constant 0x7f800000 : f32 + call @erfc_f32(%inf) : (f32) -> () + + // CHECK: nan + %nan = arith.constant 0x7fc00000 : f32 + call @erfc_f32(%nan) : (f32) -> () + + return +} + // -------------------------------------------------------------------------- // // Exp. // -------------------------------------------------------------------------- // @@ -772,6 +843,7 @@ func.func @main() { call @log2(): () -> () call @log1p(): () -> () call @erf(): () -> () + call @erfc(): () -> () call @exp(): () -> () call @expm1(): () -> () call @sin(): () -> () diff --git a/mlir/utils/vim/syntax/mlir.vim b/mlir/utils/vim/syntax/mlir.vim index 7989032eada88..070d81658ca3d 100644 --- a/mlir/utils/vim/syntax/mlir.vim +++ b/mlir/utils/vim/syntax/mlir.vim @@ -44,6 +44,7 @@ syn keyword mlirOps view " Math ops. syn match mlirOps /\/ +syn match mlirOps /\/ " Affine ops. syn match mlirOps /\/ diff --git a/offload/libomptarget/OpenMP/Mapping.cpp b/offload/libomptarget/OpenMP/Mapping.cpp index 4b78ed3360a26..14f5e7dc9d19f 100644 --- a/offload/libomptarget/OpenMP/Mapping.cpp +++ b/offload/libomptarget/OpenMP/Mapping.cpp @@ -141,48 +141,45 @@ LookupResult MappingInfoTy::lookupMapping(HDTTMapAccessorTy &HDTTMap, if (HDTTMap->empty()) return LR; + // HDTTMap is std::set, ordered by HstPtrBegin. + // Upper is the first element whose HstPtrBegin > HP. auto Upper = HDTTMap->upper_bound(HP); if (Size == 0) { - // specification v5.1 Pointer Initialization for Device Data Environments - // upper_bound satisfies - // std::prev(upper)->HDTT.HstPtrBegin <= hp < upper->HDTT.HstPtrBegin + // HP satisfies + // std::prev(Upper)->HDTT.HstPtrBegin <= HP < Upper->HDTT.HstPtrBegin if (Upper != HDTTMap->begin()) { LR.TPR.setEntry(std::prev(Upper)->HDTT, OwnedTPR); - // the left side of extended address range is satisfied. - // hp >= LR.TPR.getEntry()->HstPtrBegin || hp >= - // LR.TPR.getEntry()->HstPtrBase - LR.Flags.IsContained = HP < LR.TPR.getEntry()->HstPtrEnd || - HP < LR.TPR.getEntry()->HstPtrBase; + // We know that HP >= LR.TPR.getEntry()->HstPtrBegin + LR.Flags.IsContained = HP < LR.TPR.getEntry()->HstPtrEnd; } if (!LR.Flags.IsContained && Upper != HDTTMap->end()) { LR.TPR.setEntry(Upper->HDTT, OwnedTPR); - // the right side of extended address range is satisfied. - // hp < LR.TPR.getEntry()->HstPtrEnd || hp < LR.TPR.getEntry()->HstPtrBase + // This is a special case: HP is not really contained in the mapped + // address range, but it's contained in the extended address range, + // which suffices to get the mapping of the base pointer. + // We know that HP < LR.TPR.getEntry()->HstPtrBegin LR.Flags.IsContained = HP >= LR.TPR.getEntry()->HstPtrBase; } } else { - // check the left bin if (Upper != HDTTMap->begin()) { LR.TPR.setEntry(std::prev(Upper)->HDTT, OwnedTPR); - // Is it contained? - LR.Flags.IsContained = HP >= LR.TPR.getEntry()->HstPtrBegin && - HP < LR.TPR.getEntry()->HstPtrEnd && + // We know that HP >= LR.TPR.getEntry()->HstPtrBegin + LR.Flags.IsContained = HP < LR.TPR.getEntry()->HstPtrEnd && (HP + Size) <= LR.TPR.getEntry()->HstPtrEnd; - // Does it extend beyond the mapped region? + // Does it extend beyond the mapped address range? LR.Flags.ExtendsAfter = HP < LR.TPR.getEntry()->HstPtrEnd && (HP + Size) > LR.TPR.getEntry()->HstPtrEnd; } - // check the right bin if (!(LR.Flags.IsContained || LR.Flags.ExtendsAfter) && Upper != HDTTMap->end()) { LR.TPR.setEntry(Upper->HDTT, OwnedTPR); - // Does it extend into an already mapped region? - LR.Flags.ExtendsBefore = HP < LR.TPR.getEntry()->HstPtrBegin && - (HP + Size) > LR.TPR.getEntry()->HstPtrBegin; - // Does it extend beyond the mapped region? + // Does it extend into an already mapped address range? + // We know that HP < LR.TPR.getEntry()->HstPtrBegin + LR.Flags.ExtendsBefore = (HP + Size) > LR.TPR.getEntry()->HstPtrBegin; + // Does it extend beyond the mapped address range? LR.Flags.ExtendsAfter = HP < LR.TPR.getEntry()->HstPtrEnd && (HP + Size) > LR.TPR.getEntry()->HstPtrEnd; } diff --git a/offload/test/offloading/fortran/target-custom-mapper.f90 b/offload/test/offloading/fortran/target-custom-mapper.f90 new file mode 100644 index 0000000000000..9c527861c87b5 --- /dev/null +++ b/offload/test/offloading/fortran/target-custom-mapper.f90 @@ -0,0 +1,53 @@ +! Offloading test checking lowering of arrays with dynamic extents. +! REQUIRES: flang, amdgpu + +! RUN: %libomptarget-compile-fortran-run-and-check-generic + +program test_openmp_mapper + implicit none + integer, parameter :: n = 1024 + type :: mytype + integer :: data(n) + end type mytype + + type :: mytype2 + type(mytype) :: my_data + end type mytype2 + + ! Declare custom mappers for the derived type `mytype` + !$omp declare mapper(my_mapper1 : mytype :: t) map(to: t%data(1 : n)) + + ! Declare custom mappers for the derived type `mytype2` + !$omp declare mapper(my_mapper2 : mytype2 :: t) map(mapper(my_mapper1): t%my_data) + + type(mytype2) :: obj + integer :: i, sum_host, sum_device + + ! Initialize the host data + do i = 1, n + obj%my_data%data(i) = 1 + end do + + ! Compute the sum on the host for verification + sum_host = sum(obj%my_data%data) + + ! Offload computation to the device using the named mapper `my_mapper2` + sum_device = 0 + !$omp target map(tofrom: sum_device) map(mapper(my_mapper2) : obj) + do i = 1, n + sum_device = sum_device + obj%my_data%data(i) + end do + !$omp end target + + ! Check results + print *, "Sum on host: ", sum_host + print *, "Sum on device: ", sum_device + + if (sum_device == sum_host) then + print *, "Test passed!" + else + print *, "Test failed!" + end if + end program test_openmp_mapper + +! CHECK: Test passed! diff --git a/utils/bazel/configure.bzl b/utils/bazel/configure.bzl index c5da28845eccf..fcc9fc7ecc483 100644 --- a/utils/bazel/configure.bzl +++ b/utils/bazel/configure.bzl @@ -172,10 +172,19 @@ def _llvm_configure_impl(repository_ctx): ) # Create a starlark file with the requested LLVM targets. - targets = repository_ctx.attr.targets + llvm_targets = repository_ctx.attr.targets repository_ctx.file( "llvm/targets.bzl", - content = "llvm_targets = " + str(targets), + content = "llvm_targets = " + str(llvm_targets), + executable = False, + ) + + # Create a starlark file with the requested BOLT targets. + bolt_targets = ["AArch64","X86","RISCV"] # Supported targets. + bolt_targets = [t for t in llvm_targets if t in bolt_targets] + repository_ctx.file( + "bolt/targets.bzl", + content = "bolt_targets = " + str(bolt_targets), executable = False, ) diff --git a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel index 187938783a550..a9a7cc59575a3 100644 --- a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception load("@bazel_skylib//rules:expand_template.bzl", "expand_template") +load(":targets.bzl", "bolt_targets") package( default_visibility = ["//visibility:public"], @@ -16,6 +17,20 @@ genrule( cmd = "echo '#undef BOLT_REVISION' >> $@\n", ) +expand_template( + name = "target_config_def_gen", + out = "include/bolt/Core/TargetConfig.def", + substitutions = {"@BOLT_ENUM_TARGETS@": "\n".join( + ["BOLT_TARGET({})".format(target) for target in bolt_targets], + )}, + template = "include/bolt/Core/TargetConfig.def.in", +) + +cc_library( + name = "TargetConfig", + textual_hdrs = [":target_config_def_gen"], +) + cc_binary( name = "llvm-bolt-heatmap", srcs = glob([ @@ -24,6 +39,7 @@ cc_binary( deps = [ ":Profile", ":Rewrite", + ":TargetConfig", ":Utils", "//llvm:AllTargetsAsmParsers", "//llvm:AllTargetsDisassemblers", @@ -54,6 +70,7 @@ cc_binary( ":Profile", ":Rewrite", ":RuntimeLibs", + ":TargetConfig", ":TargetAArch64", ":TargetX86", ":Utils", diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel index 7fd7c8b438629..2aced96c112ef 100644 --- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel @@ -1048,6 +1048,7 @@ cc_library( ":frontend", ":lex", ":rewrite", + ":sema", ":serialization", "//llvm:Core", "//llvm:Support", diff --git a/utils/bazel/llvm-project-overlay/llvm/config.bzl b/utils/bazel/llvm-project-overlay/llvm/config.bzl index 6e703d22e7756..fa616bcb9a8c9 100644 --- a/utils/bazel/llvm-project-overlay/llvm/config.bzl +++ b/utils/bazel/llvm-project-overlay/llvm/config.bzl @@ -47,6 +47,7 @@ posix_defines = [ linux_defines = posix_defines + [ "_GNU_SOURCE", + "HAVE_GETAUXVAL=1", "HAVE_MALLINFO=1", "HAVE_SBRK=1", "HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC=1", diff --git a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h index 93695f8e26d27..3ef1d0c4b1651 100644 --- a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h +++ b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h @@ -296,7 +296,7 @@ /* HAVE_PROC_PID_RUSAGE defined in Bazel */ -#define HAVE_GETAUXVAL 1 +/* HAVE_GETAUXVAL defined in Bazel */ /* Directly provide definitions here behind platform preprocessor definitions. * The preprocessor conditions are sufficient to handle all of the configuration diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 92aedac837197..05385ba491525 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -96,6 +96,14 @@ exports_files(glob(["include/**/*.td"])) gentbl_cc_library( name = "OpAsmInterfaceIncGen", tbl_outs = [ + ( + ["-gen-attr-interface-decls"], + "include/mlir/IR/OpAsmAttrInterface.h.inc", + ), + ( + ["-gen-attr-interface-defs"], + "include/mlir/IR/OpAsmAttrInterface.cpp.inc", + ), ( ["-gen-op-interface-decls"], "include/mlir/IR/OpAsmOpInterface.h.inc", @@ -1729,11 +1737,17 @@ gentbl_cc_library( name = "EmitCOpsIncGen", tbl_outs = [ ( - ["-gen-dialect-decls"], + [ + "-gen-dialect-decls", + "-dialect=emitc", + ], "include/mlir/Dialect/EmitC/IR/EmitCDialect.h.inc", ), ( - ["-gen-dialect-defs"], + [ + "-gen-dialect-defs", + "-dialect=emitc", + ], "include/mlir/Dialect/EmitC/IR/EmitCDialect.cpp.inc", ), ( @@ -7763,7 +7777,6 @@ td_library( name = "TensorOpsTdFiles", srcs = [ "include/mlir/Dialect/Tensor/IR/TensorBase.td", - "include/mlir/Dialect/Tensor/IR/TensorInterfaces.td", "include/mlir/Dialect/Tensor/IR/TensorOps.td", ], includes = ["include"], @@ -7813,23 +7826,6 @@ gentbl_cc_library( deps = [":TensorOpsTdFiles"], ) -gentbl_cc_library( - name = "TensorInterfacesIncGen", - tbl_outs = [ - ( - ["--gen-op-interface-decls"], - "include/mlir/Dialect/Tensor/IR/TensorInterfaces.h.inc", - ), - ( - ["--gen-op-interface-defs"], - "include/mlir/Dialect/Tensor/IR/TensorInterfaces.cpp.inc", - ), - ], - tblgen = ":mlir-tblgen", - td_file = "include/mlir/Dialect/Tensor/IR/TensorInterfaces.td", - deps = [":TensorOpsTdFiles"], -) - cc_library( name = "TensorDialect", srcs = [ @@ -7859,13 +7855,13 @@ cc_library( ":InferIntRangeInterface", ":InferTypeOpInterface", ":InliningUtils", + ":LinalgInterfaces", ":LoopLikeInterface", ":ParallelCombiningOpInterface", ":ShapedOpInterfaces", ":SideEffectInterfaces", ":SubsetOpInterface", ":Support", - ":TensorInterfacesIncGen", ":TensorOpsIncGen", ":TilingInterface", ":TransformDialectInterfaces", @@ -11206,6 +11202,23 @@ gentbl_cc_library( deps = [":LinalgOpsTdFiles"], ) +gentbl_cc_library( + name = "LinalgRelayoutOpsIncGen", + tbl_outs = [ + ( + ["-gen-op-decls"], + "include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.h.inc", + ), + ( + ["-gen-op-defs"], + "include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.cpp.inc", + ), + ], + tblgen = ":mlir-tblgen", + td_file = "include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td", + deps = [":LinalgOpsTdFiles"], +) + gentbl_cc_library( name = "LinalgEnumsIncGen", tbl_outs = [ @@ -11532,10 +11545,37 @@ cc_library( ], ) +cc_library( + name = "LinalgInterfaces", + # Note: LinalgInterfaces.cpp is part of LinalgDialect, not this target. + # This allows TensorDialect to use the header-only RelayOpInterface without + # an implicit dependency on the LinalgDialect. + hdrs = ["include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h"], + includes = ["include"], + deps = [ + ":DestinationStyleOpInterface", + ":DialectUtils", + ":IR", + ":InferTypeOpInterface", + ":LinalgInterfacesIncGen", + ":LinalgStructuredOpsIncGen", + ":Support", + ":ViewLikeInterface", + ], +) + cc_library( name = "LinalgDialect", - srcs = glob(["lib/Dialect/Linalg/IR/*.cpp"]), - hdrs = glob(["include/mlir/Dialect/Linalg/IR/*.h"]), + srcs = [ + "lib/Dialect/Linalg/IR/LinalgDialect.cpp", + "lib/Dialect/Linalg/IR/LinalgInterfaces.cpp", + "lib/Dialect/Linalg/IR/LinalgOps.cpp", + "lib/Dialect/Linalg/IR/ValueBoundsOpInterfaceImpl.cpp", + ], + hdrs = [ + "include/mlir/Dialect/Linalg/IR/Linalg.h", + "include/mlir/Dialect/Linalg/IR/ValueBoundsOpInterfaceImpl.h", + ], includes = ["include"], deps = [ ":AffineDialect", @@ -11554,9 +11594,11 @@ cc_library( ":InferTypeOpInterface", ":InliningUtils", ":LinalgEnumsIncGen", + ":LinalgInterfaces", ":LinalgInterfacesIncGen", ":LinalgNamedStructuredOpsYamlIncGen", ":LinalgOpsIncGen", + ":LinalgRelayoutOpsIncGen", ":LinalgStructuredOpsIncGen", ":MathDialect", ":MemRefDialect", @@ -11568,6 +11610,7 @@ cc_library( ":SubsetOpInterface", ":Support", ":TensorDialect", + ":TensorUtils", ":TilingInterface", ":ValueBoundsOpInterface", ":ViewLikeInterface", @@ -11599,6 +11642,7 @@ cc_library( ":IR", ":IndexDialect", ":LinalgDialect", + ":LinalgInterfaces", ":LinalgMatchOpsIncGen", ":LinalgTransformEnumsIncGen", ":LinalgTransformOpsIncGen", @@ -11710,6 +11754,7 @@ cc_library( ":IR", ":IndexDialect", ":LinalgDialect", + ":LinalgInterfaces", ":LinalgPassIncGen", ":LinalgStructuredOpsIncGen", ":LinalgUtils", diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel index 3e6114abfc078..9b005b206a101 100644 --- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel @@ -425,6 +425,7 @@ cc_library( "//mlir:LLVMDialect", "//mlir:LLVMIRToLLVMTranslation", "//mlir:LinalgDialect", + "//mlir:LinalgInterfaces", "//mlir:LoopLikeInterface", "//mlir:MemorySlotInterfaces", "//mlir:Pass",