diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index 42610d505c2bd..07656c0155d87 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -286,6 +286,7 @@ void initializeScalarizerLegacyPassPass(PassRegistry &); void initializeScavengerTestPass(PassRegistry &); void initializeScopedNoAliasAAWrapperPassPass(PassRegistry &); void initializeSeparateConstOffsetFromGEPLegacyPassPass(PassRegistry &); +void initializeSinkGEPConstOffsetLegacyPassPass(PassRegistry &); void initializeShadowStackGCLoweringPass(PassRegistry &); void initializeShrinkWrapLegacyPass(PassRegistry &); void initializeSingleLoopExtractorPass(PassRegistry &); diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h index 5965be676ea69..8c12aef44f1b2 100644 --- a/llvm/include/llvm/LinkAllPasses.h +++ b/llvm/include/llvm/LinkAllPasses.h @@ -134,6 +134,7 @@ struct ForcePassLinking { (void)llvm::createPartiallyInlineLibCallsPass(); (void)llvm::createScalarizerPass(); (void)llvm::createSeparateConstOffsetFromGEPPass(); + (void)llvm::createSinkGEPConstOffsetPass(); (void)llvm::createSpeculativeExecutionPass(); (void)llvm::createSpeculativeExecutionIfHasBranchDivergencePass(); (void)llvm::createStraightLineStrengthReducePass(); diff --git a/llvm/include/llvm/Transforms/Scalar.h b/llvm/include/llvm/Transforms/Scalar.h index fc772a7639c47..389324c25cdaf 100644 --- a/llvm/include/llvm/Transforms/Scalar.h +++ b/llvm/include/llvm/Transforms/Scalar.h @@ -164,6 +164,13 @@ FunctionPass *createPartiallyInlineLibCallsPass(); // FunctionPass *createSeparateConstOffsetFromGEPPass(bool LowerGEP = false); +//===----------------------------------------------------------------------===// +// +// SinkGEPConstOffset - Sink constant offsets down the GEP chain to the tail for +// reduction of register usage. +// +FunctionPass *createSinkGEPConstOffsetPass(); + //===----------------------------------------------------------------------===// // // SpeculativeExecution - Aggressively hoist instructions to enable diff --git a/llvm/include/llvm/Transforms/Scalar/SinkGEPConstOffset.h b/llvm/include/llvm/Transforms/Scalar/SinkGEPConstOffset.h new file mode 100644 index 0000000000000..43f64d818dc22 --- /dev/null +++ b/llvm/include/llvm/Transforms/Scalar/SinkGEPConstOffset.h @@ -0,0 +1,27 @@ +//===- SinkGEPConstOffset.h -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_SINKGEPCONSTOFFSET_H +#define LLVM_TRANSFORMS_SCALAR_SINKGEPCONSTOFFSET_H + +#include "llvm/IR/PassManager.h" + +namespace llvm { + +class SinkGEPConstOffsetPass + : public PassInfoMixin { +public: + SinkGEPConstOffsetPass() {} + void printPipeline(raw_ostream &OS, + function_ref MapClassName2PassName); + PreservedAnalyses run(Function &F, FunctionAnalysisManager &); +}; + +} // end namespace llvm + +#endif // LLVM_TRANSFORMS_SCALAR_SINKGEPCONSTOFFSET_H diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 56e91703cb019..08faa7f0cb14c 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -329,6 +329,7 @@ #include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h" #include "llvm/Transforms/Scalar/Scalarizer.h" #include "llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h" +#include "llvm/Transforms/Scalar/SinkGEPConstOffset.h" #include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h" #include "llvm/Transforms/Scalar/SimplifyCFG.h" #include "llvm/Transforms/Scalar/Sink.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 94dabe290213d..5cfde2380705b 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -474,6 +474,8 @@ FUNCTION_PASS("sccp", SCCPPass()) FUNCTION_PASS("select-optimize", SelectOptimizePass(TM)) FUNCTION_PASS("separate-const-offset-from-gep", SeparateConstOffsetFromGEPPass()) +FUNCTION_PASS("sink-gep-const-offset", + SinkGEPConstOffsetPass()) FUNCTION_PASS("sink", SinkingPass()) FUNCTION_PASS("sjlj-eh-prepare", SjLjEHPreparePass(TM)) FUNCTION_PASS("slp-vectorizer", SLPVectorizerPass()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index ccb251b730f16..aa8895d730f23 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -104,6 +104,7 @@ #include "llvm/Transforms/Scalar/LoopDataPrefetch.h" #include "llvm/Transforms/Scalar/NaryReassociate.h" #include "llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h" +#include "llvm/Transforms/Scalar/SinkGEPConstOffset.h" #include "llvm/Transforms/Scalar/Sink.h" #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h" #include "llvm/Transforms/Scalar/StructurizeCFG.h" @@ -1210,6 +1211,7 @@ void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { if (isPassEnabled(EnableLoopPrefetch, CodeGenOptLevel::Aggressive)) addPass(createLoopDataPrefetchPass()); addPass(createSeparateConstOffsetFromGEPPass()); + addPass(createSinkGEPConstOffsetPass()); // ReassociateGEPs exposes more opportunities for SLSR. See // the example in reassociate-geps-and-slsr.ll. addPass(createStraightLineStrengthReducePass()); @@ -2288,6 +2290,8 @@ void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses( addPass(SeparateConstOffsetFromGEPPass()); + addPass(SinkGEPConstOffsetPass()); + // ReassociateGEPs exposes more opportunities for SLSR. See // the example in reassociate-geps-and-slsr.ll. addPass(StraightLineStrengthReducePass()); diff --git a/llvm/lib/Transforms/Scalar/CMakeLists.txt b/llvm/lib/Transforms/Scalar/CMakeLists.txt index 84a5b02043d01..5431e91eacea8 100644 --- a/llvm/lib/Transforms/Scalar/CMakeLists.txt +++ b/llvm/lib/Transforms/Scalar/CMakeLists.txt @@ -71,6 +71,7 @@ add_llvm_component_library(LLVMScalarOpts Scalarizer.cpp ScalarizeMaskedMemIntrin.cpp SeparateConstOffsetFromGEP.cpp + SinkGEPConstOffset.cpp SimpleLoopUnswitch.cpp SimplifyCFGPass.cpp Sink.cpp diff --git a/llvm/lib/Transforms/Scalar/Scalar.cpp b/llvm/lib/Transforms/Scalar/Scalar.cpp index c7e4a3e824700..5e2d1132097ba 100644 --- a/llvm/lib/Transforms/Scalar/Scalar.cpp +++ b/llvm/lib/Transforms/Scalar/Scalar.cpp @@ -45,6 +45,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) { initializeSinkingLegacyPassPass(Registry); initializeTailCallElimPass(Registry); initializeSeparateConstOffsetFromGEPLegacyPassPass(Registry); + initializeSinkGEPConstOffsetLegacyPassPass(Registry); initializeSpeculativeExecutionLegacyPassPass(Registry); initializeStraightLineStrengthReduceLegacyPassPass(Registry); initializePlaceBackedgeSafepointsLegacyPassPass(Registry); diff --git a/llvm/lib/Transforms/Scalar/SinkGEPConstOffset.cpp b/llvm/lib/Transforms/Scalar/SinkGEPConstOffset.cpp new file mode 100644 index 0000000000000..2790e2f56445f --- /dev/null +++ b/llvm/lib/Transforms/Scalar/SinkGEPConstOffset.cpp @@ -0,0 +1,260 @@ +//===- SinkGEPConstOffset.cpp -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Scalar/SinkGEPConstOffset.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/PatternMatch.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/Local.h" +#include +#include +#include + +using namespace llvm; +using namespace llvm::PatternMatch; + +static cl::opt DisableSinkGEPConstOffset( + "disable-sink-gep-const-offset", cl::init(false), + cl::desc("Do not sink the constant offset from a GEP instruction"), + cl::Hidden); + +namespace { + +/// A pass that tries to sink const offset in GEP chain to tail. +/// It is a FunctionPass because searching for the constant offset may inspect +/// other basic blocks. +class SinkGEPConstOffsetLegacyPass : public FunctionPass { +public: + static char ID; + + SinkGEPConstOffsetLegacyPass() : FunctionPass(ID) { + initializeSinkGEPConstOffsetLegacyPassPass( + *PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + } + + bool runOnFunction(Function &F) override; +}; + +/// A pass that tries to sink const offset in GEP chain to tail. +/// It is a FunctionPass because searching for the constant offset may inspect +/// other basic blocks. +class SinkGEPConstOffset { +public: + SinkGEPConstOffset() {} + + bool run(Function &F); + +private: + /// Sink constant offset in a GEP chain to tail. For example, + /// %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 512 + /// %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 %ofst0 + /// %gep2 = getelementptr half, ptr addrspace(3) %gep1, i32 %ofst1 + /// %data = load half, ptr addrspace(3) %gep2, align 2 + /// ==> + /// %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 %ofst0 + /// %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 %ofst1 + /// %gep2 = getelementptr half, ptr addrspace(3) %gep1, i32 512 + /// %data = load half, ptr addrspace(3) %gep2, align 2 + /// + /// Return true if Ptr is a candidate for upper GEP in recursive calling. + bool sinkGEPConstantOffset(Value *Ptr, bool &Changed); + + const DataLayout *DL = nullptr; +}; + +} // end anonymous namespace + +char SinkGEPConstOffsetLegacyPass::ID = 0; + +INITIALIZE_PASS_BEGIN( + SinkGEPConstOffsetLegacyPass, "sink-gep-const-offset", + "Sink const offsets down the GEP chain to the tail for reduction of " + "register usage", false, false) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) +INITIALIZE_PASS_END( + SinkGEPConstOffsetLegacyPass, "sink-gep-const-offset", + "Sink const offsets down the GEP chain to the tail for reduction of " + "register usage", false, false) + +FunctionPass *llvm::createSinkGEPConstOffsetPass() { + return new SinkGEPConstOffsetLegacyPass(); +} + +bool SinkGEPConstOffsetLegacyPass::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + + SinkGEPConstOffset Impl; + return Impl.run(F); +} + +bool SinkGEPConstOffset::run(Function &F) { + if (DisableSinkGEPConstOffset) + return false; + + DL = &F.getDataLayout(); + + bool Changed = false; + for (BasicBlock &B : F) + for (Instruction &I : llvm::make_early_inc_range(B)) + if (GetElementPtrInst *GEP = dyn_cast(&I)) + sinkGEPConstantOffset(GEP, Changed); + + return Changed; +} + +bool SinkGEPConstOffset::sinkGEPConstantOffset(Value *Ptr, bool &Changed) { + // The purpose of this function is to sink the constant offsets in the GEP + // chain to the tail of the chain. + // This algorithm is implemented recursively, the algorithm starts from the + // tail of the chain through the DFS method and shifts the constant offset + // of the GEP step by step upwards by bottom-up DFS method, i.e. step by step + // down to the tail. + // A simple example is given: + /// %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 512 + /// %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 %ofst0 + /// %gep2 = getelementptr half, ptr addrspace(3) %gep1, i32 %ofst1 + /// %data = load half, ptr addrspace(3) %gep2, align 2 + /// ==> + /// %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 %ofst0 + /// %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 %ofst1 + /// %gep2 = getelementptr half, ptr addrspace(3) %gep1, i32 512 + /// %data = load half, ptr addrspace(3) %gep2, align 2 + GetElementPtrInst *GEP = dyn_cast(Ptr); + if (!GEP) + return false; + + if (!GEP->getParent()) + return false; + + bool BaseResult = sinkGEPConstantOffset(GEP->getPointerOperand(), Changed); + + if (GEP->getNumIndices() != 1) + return false; + + ConstantInt *C = nullptr; + Value *Idx = GEP->getOperand(1); + bool MatchConstant = match(Idx, m_ConstantInt(C)); + + if (!BaseResult) + return MatchConstant; + + Type *ResTy = GEP->getResultElementType(); + GetElementPtrInst *BaseGEP = + cast(GEP->getPointerOperand()); + Value *BaseIdx = BaseGEP->getOperand(1); + Type *BaseResTy = BaseGEP->getResultElementType(); + + if (MatchConstant) { + // %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 8 + // %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 4 + // as: + // %gep1 = getelementptr half, ptr addrspace(3) %ptr, i32 12 + Type *NewResTy = nullptr; + int64_t NewIdxValue = 0; + if (ResTy == BaseResTy) { + NewResTy = ResTy; + NewIdxValue = cast(BaseIdx)->getSExtValue() + + cast(Idx)->getSExtValue(); + } else { + NewResTy = Type::getInt8Ty(GEP->getContext()); + NewIdxValue = (cast(BaseIdx)->getSExtValue() * + DL->getTypeAllocSize(BaseResTy)) + + (cast(Idx)->getSExtValue() * + DL->getTypeAllocSize(ResTy)); + } + assert(NewResTy); + Type *NewIdxType = (Idx->getType()->getPrimitiveSizeInBits() > + BaseIdx->getType()->getPrimitiveSizeInBits()) + ? Idx->getType() : BaseIdx->getType(); + Constant *NewIdx = ConstantInt::get(NewIdxType, NewIdxValue); + auto *NewGEP = GetElementPtrInst::Create( + NewResTy, BaseGEP->getPointerOperand(), NewIdx); + NewGEP->setIsInBounds(GEP->isInBounds()); + NewGEP->insertBefore(GEP->getIterator()); + NewGEP->takeName(GEP); + + GEP->replaceAllUsesWith(NewGEP); + RecursivelyDeleteTriviallyDeadInstructions(GEP); + + Changed = true; + return true; + } + + // %gep0 = getelementptr half, ptr addrspace(3) %ptr, i32 8 + // %gep1 = getelementptr half, ptr addrspace(3) %gep0, i32 %idx + // as: + // %gepx0 = getelementptr half, ptr addrspace(3) %ptr, i32 %idx + // %gepx1 = getelementptr half, ptr addrspace(3) %gepx0, i32 8 + auto *GEPX0 = + GetElementPtrInst::Create(ResTy, BaseGEP->getPointerOperand(), Idx); + GEPX0->setIsInBounds(BaseGEP->isInBounds()); + GEPX0->insertBefore(GEP->getIterator()); + auto *GEPX1 = GetElementPtrInst::Create(BaseResTy, GEPX0, BaseIdx); + GEPX1->setIsInBounds(GEP->isInBounds()); + GEPX1->insertBefore(GEP->getIterator()); + GEPX1->takeName(GEP); + + GEP->replaceAllUsesWith(GEPX1); + RecursivelyDeleteTriviallyDeadInstructions(GEP); + + Changed = true; + return true; +} + +void SinkGEPConstOffsetPass::printPipeline( + raw_ostream &OS, function_ref MapClassName2PassName) { + static_cast *>(this) + ->printPipeline(OS, MapClassName2PassName); +} + +PreservedAnalyses +SinkGEPConstOffsetPass::run(Function &F, FunctionAnalysisManager &AM) { + SinkGEPConstOffset Impl; + if (!Impl.run(F)) + return PreservedAnalyses::all(); + + PreservedAnalyses PA; + PA.preserveSet(); + return PA; +} diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll index 29736b62f2c00..9c18b76f7d972 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -487,6 +487,7 @@ ; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis ; GCN-O1-OPTS-NEXT: Loop Data Prefetch ; GCN-O1-OPTS-NEXT: Split GEPs to a variadic base and a constant offset for better CSE +; GCN-O1-OPTS-NEXT: Sink const offsets down the GEP chain to the tail for reduction of register usage ; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis ; GCN-O1-OPTS-NEXT: Straight line strength reduction ; GCN-O1-OPTS-NEXT: Early CSE @@ -794,6 +795,7 @@ ; GCN-O2-NEXT: Natural Loop Information ; GCN-O2-NEXT: AMDGPU Promote Alloca ; GCN-O2-NEXT: Split GEPs to a variadic base and a constant offset for better CSE +; GCN-O2-NEXT: Sink const offsets down the GEP chain to the tail for reduction of register usage ; GCN-O2-NEXT: Scalar Evolution Analysis ; GCN-O2-NEXT: Straight line strength reduction ; GCN-O2-NEXT: Early CSE @@ -1111,6 +1113,7 @@ ; GCN-O3-NEXT: Natural Loop Information ; GCN-O3-NEXT: AMDGPU Promote Alloca ; GCN-O3-NEXT: Split GEPs to a variadic base and a constant offset for better CSE +; GCN-O3-NEXT: Sink const offsets down the GEP chain to the tail for reduction of register usage ; GCN-O3-NEXT: Scalar Evolution Analysis ; GCN-O3-NEXT: Straight line strength reduction ; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll index 565ad295ebbb3..4f5d93d767a7a 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll @@ -21,15 +21,15 @@ define amdgpu_kernel void @test_iglp_opt_mfma_gemm(ptr addrspace(3) noalias %in, ; GCN-NEXT: ; iglp_opt mask(0x00000000) ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_u32_e32 v1, s0, v0 -; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; GCN-NEXT: ds_read_b128 a[28:31], v2 offset:57456 -; GCN-NEXT: ds_read_b128 a[24:27], v2 offset:57440 -; GCN-NEXT: ds_read_b128 a[20:23], v2 offset:57424 -; GCN-NEXT: ds_read_b128 a[16:19], v2 offset:57408 -; GCN-NEXT: ds_read_b128 a[0:3], v2 offset:57344 -; GCN-NEXT: ds_read_b128 a[4:7], v2 offset:57360 -; GCN-NEXT: ds_read_b128 a[8:11], v2 offset:57376 -; GCN-NEXT: ds_read_b128 a[12:15], v2 offset:57392 +; GCN-NEXT: v_add_u32_e32 v2, 0x14000, v1 +; GCN-NEXT: ds_read_b128 a[28:31], v2 offset:112 +; GCN-NEXT: ds_read_b128 a[24:27], v2 offset:96 +; GCN-NEXT: ds_read_b128 a[20:23], v2 offset:80 +; GCN-NEXT: ds_read_b128 a[16:19], v2 offset:64 +; GCN-NEXT: ds_read_b128 a[0:3], v2 +; GCN-NEXT: ds_read_b128 a[4:7], v2 offset:16 +; GCN-NEXT: ds_read_b128 a[8:11], v2 offset:32 +; GCN-NEXT: ds_read_b128 a[12:15], v2 offset:48 ; GCN-NEXT: v_mov_b32_e32 v2, 1.0 ; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:49264 ; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:49248 @@ -199,17 +199,17 @@ define amdgpu_kernel void @test_iglp_opt_rev_mfma_gemm(ptr addrspace(3) noalias ; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:49184 ; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:49168 ; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; GCN-NEXT: v_add_u32_e32 v1, 0x6000, v1 +; GCN-NEXT: v_add_u32_e32 v1, 0x14000, v1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v3, a[64:95] -; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:57456 -; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:57440 -; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:57424 -; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:57408 -; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:57344 -; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:57360 -; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:57376 -; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:57392 +; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:112 +; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:96 +; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:80 +; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:64 +; GCN-NEXT: ds_read_b128 a[32:35], v1 +; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:16 +; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:32 +; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:48 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v3, a[32:63] ; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:112 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll index 371b4f070094d..c80c42ce73efb 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll @@ -25,7 +25,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-MINREG-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; GCN-MINREG-NEXT: v_add_u32_e32 v5, s1, v0 ; GCN-MINREG-NEXT: v_mov_b32_e32 v0, s1 -; GCN-MINREG-NEXT: v_add_u32_e32 v3, 0x6000, v4 +; GCN-MINREG-NEXT: v_add_u32_e32 v3, 0x14000, v4 ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) ; GCN-MINREG-NEXT: s_nop 7 @@ -110,14 +110,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-MINREG-NEXT: ds_write_b128 v0, a[12:15] offset:24624 ; GCN-MINREG-NEXT: ds_write_b128 v0, a[0:3] offset:24576 ; GCN-MINREG-NEXT: ds_write_b128 v0, a[4:7] offset:24592 -; GCN-MINREG-NEXT: ds_read_b128 a[28:31], v3 offset:57456 -; GCN-MINREG-NEXT: ds_read_b128 a[24:27], v3 offset:57440 -; GCN-MINREG-NEXT: ds_read_b128 a[20:23], v3 offset:57424 -; GCN-MINREG-NEXT: ds_read_b128 a[16:19], v3 offset:57408 -; GCN-MINREG-NEXT: ds_read_b128 a[0:3], v3 offset:57344 -; GCN-MINREG-NEXT: ds_read_b128 a[4:7], v3 offset:57360 -; GCN-MINREG-NEXT: ds_read_b128 a[8:11], v3 offset:57376 -; GCN-MINREG-NEXT: ds_read_b128 a[12:15], v3 offset:57392 +; GCN-MINREG-NEXT: ds_read_b128 a[28:31], v3 offset:112 +; GCN-MINREG-NEXT: ds_read_b128 a[24:27], v3 offset:96 +; GCN-MINREG-NEXT: ds_read_b128 a[20:23], v3 offset:80 +; GCN-MINREG-NEXT: ds_read_b128 a[16:19], v3 offset:64 +; GCN-MINREG-NEXT: ds_read_b128 a[0:3], v3 +; GCN-MINREG-NEXT: ds_read_b128 a[4:7], v3 offset:16 +; GCN-MINREG-NEXT: ds_read_b128 a[8:11], v3 offset:32 +; GCN-MINREG-NEXT: ds_read_b128 a[12:15], v3 offset:48 ; GCN-MINREG-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MINREG-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -229,7 +229,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 offset:49152 ; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MAXOCC-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] -; GCN-MAXOCC-NEXT: v_add_u32_e32 v0, 0x6000, v0 +; GCN-MAXOCC-NEXT: v_add_u32_e32 v0, 0x14000, v0 ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) @@ -244,14 +244,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-MAXOCC-NEXT: ds_write_b128 v1, a[12:15] offset:24624 ; GCN-MAXOCC-NEXT: ds_write_b128 v1, a[0:3] offset:24576 ; GCN-MAXOCC-NEXT: ds_write_b128 v1, a[4:7] offset:24592 -; GCN-MAXOCC-NEXT: ds_read_b128 a[28:31], v0 offset:57456 -; GCN-MAXOCC-NEXT: ds_read_b128 a[24:27], v0 offset:57440 -; GCN-MAXOCC-NEXT: ds_read_b128 a[20:23], v0 offset:57424 -; GCN-MAXOCC-NEXT: ds_read_b128 a[16:19], v0 offset:57408 -; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 offset:57344 -; GCN-MAXOCC-NEXT: ds_read_b128 a[4:7], v0 offset:57360 -; GCN-MAXOCC-NEXT: ds_read_b128 a[8:11], v0 offset:57376 -; GCN-MAXOCC-NEXT: ds_read_b128 a[12:15], v0 offset:57392 +; GCN-MAXOCC-NEXT: ds_read_b128 a[28:31], v0 offset:112 +; GCN-MAXOCC-NEXT: ds_read_b128 a[24:27], v0 offset:96 +; GCN-MAXOCC-NEXT: ds_read_b128 a[20:23], v0 offset:80 +; GCN-MAXOCC-NEXT: ds_read_b128 a[16:19], v0 offset:64 +; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 +; GCN-MAXOCC-NEXT: ds_read_b128 a[4:7], v0 offset:16 +; GCN-MAXOCC-NEXT: ds_read_b128 a[8:11], v0 offset:32 +; GCN-MAXOCC-NEXT: ds_read_b128 a[12:15], v0 offset:48 ; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MAXOCC-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -357,26 +357,26 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-ILP-NEXT: ds_read_b128 a[24:27], v3 offset:49248 ; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0) ; GCN-ILP-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; GCN-ILP-NEXT: v_add_u32_e32 v3, 0x6000, v3 +; GCN-ILP-NEXT: v_add_u32_e32 v3, 0x14000, v3 ; GCN-ILP-NEXT: s_nop 7 ; GCN-ILP-NEXT: s_nop 7 ; GCN-ILP-NEXT: s_nop 1 ; GCN-ILP-NEXT: ds_write_b128 v0, a[4:7] offset:24592 -; GCN-ILP-NEXT: ds_read_b128 a[4:7], v3 offset:57360 +; GCN-ILP-NEXT: ds_read_b128 a[4:7], v3 offset:16 ; GCN-ILP-NEXT: ds_write_b128 v0, a[0:3] offset:24576 -; GCN-ILP-NEXT: ds_read_b128 a[0:3], v3 offset:57344 +; GCN-ILP-NEXT: ds_read_b128 a[0:3], v3 ; GCN-ILP-NEXT: ds_write_b128 v0, a[12:15] offset:24624 -; GCN-ILP-NEXT: ds_read_b128 a[12:15], v3 offset:57392 +; GCN-ILP-NEXT: ds_read_b128 a[12:15], v3 offset:48 ; GCN-ILP-NEXT: ds_write_b128 v0, a[8:11] offset:24608 -; GCN-ILP-NEXT: ds_read_b128 a[8:11], v3 offset:57376 +; GCN-ILP-NEXT: ds_read_b128 a[8:11], v3 offset:32 ; GCN-ILP-NEXT: ds_write_b128 v0, a[20:23] offset:24656 -; GCN-ILP-NEXT: ds_read_b128 a[20:23], v3 offset:57424 +; GCN-ILP-NEXT: ds_read_b128 a[20:23], v3 offset:80 ; GCN-ILP-NEXT: ds_write_b128 v0, a[16:19] offset:24640 -; GCN-ILP-NEXT: ds_read_b128 a[16:19], v3 offset:57408 +; GCN-ILP-NEXT: ds_read_b128 a[16:19], v3 offset:64 ; GCN-ILP-NEXT: ds_write_b128 v0, a[28:31] offset:24688 -; GCN-ILP-NEXT: ds_read_b128 a[28:31], v3 offset:57456 +; GCN-ILP-NEXT: ds_read_b128 a[28:31], v3 offset:112 ; GCN-ILP-NEXT: ds_write_b128 v0, a[24:27] offset:24672 -; GCN-ILP-NEXT: ds_read_b128 a[24:27], v3 offset:57440 +; GCN-ILP-NEXT: ds_read_b128 a[24:27], v3 offset:96 ; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0) ; GCN-ILP-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] ; GCN-ILP-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) @@ -536,7 +536,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl ; GCN-MINREG-NEXT: ds_read_b128 a[12:15], v3 offset:24624 ; GCN-MINREG-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MINREG-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v0, a[0:31] -; GCN-MINREG-NEXT: v_add_u32_e32 v4, 0x6000, v3 +; GCN-MINREG-NEXT: v_add_u32_e32 v4, 0x14000, v3 ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) ; GCN-MINREG-NEXT: s_nop 7 @@ -574,14 +574,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl ; GCN-MINREG-NEXT: ds_write_b128 v2, a[8:11] offset:24608 ; GCN-MINREG-NEXT: ds_write_b128 v2, a[4:7] offset:24592 ; GCN-MINREG-NEXT: ds_write_b128 v2, a[0:3] offset:24576 -; GCN-MINREG-NEXT: ds_read_b128 a[28:31], v4 offset:57456 -; GCN-MINREG-NEXT: ds_read_b128 a[24:27], v4 offset:57440 -; GCN-MINREG-NEXT: ds_read_b128 a[20:23], v4 offset:57424 -; GCN-MINREG-NEXT: ds_read_b128 a[16:19], v4 offset:57408 -; GCN-MINREG-NEXT: ds_read_b128 a[0:3], v4 offset:57344 -; GCN-MINREG-NEXT: ds_read_b128 a[4:7], v4 offset:57360 -; GCN-MINREG-NEXT: ds_read_b128 a[8:11], v4 offset:57376 -; GCN-MINREG-NEXT: ds_read_b128 a[12:15], v4 offset:57392 +; GCN-MINREG-NEXT: ds_read_b128 a[28:31], v4 offset:112 +; GCN-MINREG-NEXT: ds_read_b128 a[24:27], v4 offset:96 +; GCN-MINREG-NEXT: ds_read_b128 a[20:23], v4 offset:80 +; GCN-MINREG-NEXT: ds_read_b128 a[16:19], v4 offset:64 +; GCN-MINREG-NEXT: ds_read_b128 a[0:3], v4 +; GCN-MINREG-NEXT: ds_read_b128 a[4:7], v4 offset:16 +; GCN-MINREG-NEXT: ds_read_b128 a[8:11], v4 offset:32 +; GCN-MINREG-NEXT: ds_read_b128 a[12:15], v4 offset:48 ; GCN-MINREG-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MINREG-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v0, a[0:31] ; GCN-MINREG-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -694,7 +694,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl ; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 offset:49152 ; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MAXOCC-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; GCN-MAXOCC-NEXT: v_add_u32_e32 v0, 0x6000, v0 +; GCN-MAXOCC-NEXT: v_add_u32_e32 v0, 0x14000, v0 ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) @@ -709,14 +709,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl ; GCN-MAXOCC-NEXT: ds_write_b128 v3, a[8:11] offset:24608 ; GCN-MAXOCC-NEXT: ds_write_b128 v3, a[4:7] offset:24592 ; GCN-MAXOCC-NEXT: ds_write_b128 v3, a[0:3] offset:24576 -; GCN-MAXOCC-NEXT: ds_read_b128 a[28:31], v0 offset:57456 -; GCN-MAXOCC-NEXT: ds_read_b128 a[24:27], v0 offset:57440 -; GCN-MAXOCC-NEXT: ds_read_b128 a[20:23], v0 offset:57424 -; GCN-MAXOCC-NEXT: ds_read_b128 a[16:19], v0 offset:57408 -; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 offset:57344 -; GCN-MAXOCC-NEXT: ds_read_b128 a[4:7], v0 offset:57360 -; GCN-MAXOCC-NEXT: ds_read_b128 a[8:11], v0 offset:57376 -; GCN-MAXOCC-NEXT: ds_read_b128 a[12:15], v0 offset:57392 +; GCN-MAXOCC-NEXT: ds_read_b128 a[28:31], v0 offset:112 +; GCN-MAXOCC-NEXT: ds_read_b128 a[24:27], v0 offset:96 +; GCN-MAXOCC-NEXT: ds_read_b128 a[20:23], v0 offset:80 +; GCN-MAXOCC-NEXT: ds_read_b128 a[16:19], v0 offset:64 +; GCN-MAXOCC-NEXT: ds_read_b128 a[0:3], v0 +; GCN-MAXOCC-NEXT: ds_read_b128 a[4:7], v0 offset:16 +; GCN-MAXOCC-NEXT: ds_read_b128 a[8:11], v0 offset:32 +; GCN-MAXOCC-NEXT: ds_read_b128 a[12:15], v0 offset:48 ; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0) ; GCN-MAXOCC-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] ; GCN-MAXOCC-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -829,26 +829,26 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl ; GCN-ILP-NEXT: ds_read_b128 a[28:31], v3 offset:49264 ; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0) ; GCN-ILP-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; GCN-ILP-NEXT: v_add_u32_e32 v3, 0x6000, v3 +; GCN-ILP-NEXT: v_add_u32_e32 v3, 0x14000, v3 ; GCN-ILP-NEXT: s_nop 7 ; GCN-ILP-NEXT: s_nop 7 ; GCN-ILP-NEXT: s_nop 1 ; GCN-ILP-NEXT: ds_write_b128 v2, a[0:3] offset:24576 -; GCN-ILP-NEXT: ds_read_b128 a[0:3], v3 offset:57344 +; GCN-ILP-NEXT: ds_read_b128 a[0:3], v3 ; GCN-ILP-NEXT: ds_write_b128 v2, a[4:7] offset:24592 -; GCN-ILP-NEXT: ds_read_b128 a[4:7], v3 offset:57360 +; GCN-ILP-NEXT: ds_read_b128 a[4:7], v3 offset:16 ; GCN-ILP-NEXT: ds_write_b128 v2, a[8:11] offset:24608 -; GCN-ILP-NEXT: ds_read_b128 a[8:11], v3 offset:57376 +; GCN-ILP-NEXT: ds_read_b128 a[8:11], v3 offset:32 ; GCN-ILP-NEXT: ds_write_b128 v2, a[12:15] offset:24624 -; GCN-ILP-NEXT: ds_read_b128 a[12:15], v3 offset:57392 +; GCN-ILP-NEXT: ds_read_b128 a[12:15], v3 offset:48 ; GCN-ILP-NEXT: ds_write_b128 v2, a[16:19] offset:24640 -; GCN-ILP-NEXT: ds_read_b128 a[16:19], v3 offset:57408 +; GCN-ILP-NEXT: ds_read_b128 a[16:19], v3 offset:64 ; GCN-ILP-NEXT: ds_write_b128 v2, a[20:23] offset:24656 -; GCN-ILP-NEXT: ds_read_b128 a[20:23], v3 offset:57424 +; GCN-ILP-NEXT: ds_read_b128 a[20:23], v3 offset:80 ; GCN-ILP-NEXT: ds_write_b128 v2, a[24:27] offset:24672 -; GCN-ILP-NEXT: ds_read_b128 a[24:27], v3 offset:57440 +; GCN-ILP-NEXT: ds_read_b128 a[24:27], v3 offset:96 ; GCN-ILP-NEXT: ds_write_b128 v2, a[28:31] offset:24688 -; GCN-ILP-NEXT: ds_read_b128 a[28:31], v3 offset:57456 +; GCN-ILP-NEXT: ds_read_b128 a[28:31], v3 offset:112 ; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0) ; GCN-ILP-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] ; GCN-ILP-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll index 73586b1243376..218e1b513c097 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll @@ -623,6 +623,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 1.0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_u32_e32 v1, s0, v0 ; GCN-NEXT: ds_read_b128 a[156:159], v1 offset:112 @@ -633,51 +634,50 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; GCN-NEXT: ds_read_b128 a[132:135], v1 offset:16 ; GCN-NEXT: ds_read_b128 a[136:139], v1 offset:32 ; GCN-NEXT: ds_read_b128 a[140:143], v1 offset:48 -; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:8304 -; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:24688 -; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:24672 -; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:24656 -; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:24640 -; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:24624 -; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:24608 -; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:24592 -; GCN-NEXT: ds_read_b128 a[96:99], v1 offset:24576 -; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:49264 -; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:49248 -; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:49232 -; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:49216 -; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:49200 -; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:49184 -; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:49168 -; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; GCN-NEXT: v_mov_b32_e32 v1, 1.0 -; GCN-NEXT: ds_read_b128 a[60:63], v2 offset:57456 -; GCN-NEXT: ds_read_b128 a[56:59], v2 offset:57440 -; GCN-NEXT: ds_read_b128 a[52:55], v2 offset:57424 -; GCN-NEXT: ds_read_b128 a[48:51], v2 offset:57408 -; GCN-NEXT: ds_read_b128 a[32:35], v2 offset:57344 -; GCN-NEXT: ds_read_b128 a[36:39], v2 offset:57360 -; GCN-NEXT: ds_read_b128 a[40:43], v2 offset:57376 -; GCN-NEXT: ds_read_b128 a[44:47], v2 offset:57392 -; GCN-NEXT: v_mov_b32_e32 v2, 2.0 +; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:8304 +; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:8288 +; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:8272 +; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:8256 +; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:8240 +; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:8224 +; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:8208 +; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:8192 +; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:24688 +; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:24672 +; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:24656 +; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:24640 +; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:24624 +; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:24608 +; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:24592 +; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:24576 +; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:49264 +; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:49248 +; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:49232 +; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:49216 +; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:49200 +; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:49184 +; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:49168 +; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:49152 +; GCN-NEXT: v_add_u32_e32 v1, 0x14000, v1 +; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:112 +; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:96 +; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:80 +; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:64 +; GCN-NEXT: ds_read_b128 a[96:99], v1 +; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:16 +; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:32 +; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:48 +; GCN-NEXT: v_mov_b32_e32 v1, 2.0 ; GCN-NEXT: v_add_u32_e32 v0, s1, v0 ; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0) ; GCN-NEXT: s_waitcnt lgkmcnt(14) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159] -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v1, a[128:159] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v1, a[64:95] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v1, a[32:63] ; GCN-NEXT: s_waitcnt lgkmcnt(8) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v1, a[96:127] ; GCN-NEXT: s_nop 7 ; GCN-NEXT: s_nop 4 ; GCN-NEXT: ds_write_b128 v0, a[156:159] offset:112 @@ -689,38 +689,38 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; GCN-NEXT: ds_write_b128 v0, a[132:135] offset:16 ; GCN-NEXT: ds_write_b128 v0, a[128:131] ; GCN-NEXT: v_mov_b32_e32 v0, s1 -; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:8288 -; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:8304 -; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:8256 -; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:8272 -; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:8224 -; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:8240 -; GCN-NEXT: ds_write_b128 v0, a[0:3] offset:8192 -; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:8208 -; GCN-NEXT: ds_write_b128 v0, a[120:123] offset:16480 -; GCN-NEXT: ds_write_b128 v0, a[124:127] offset:16496 -; GCN-NEXT: ds_write_b128 v0, a[112:115] offset:16448 -; GCN-NEXT: ds_write_b128 v0, a[116:119] offset:16464 -; GCN-NEXT: ds_write_b128 v0, a[104:107] offset:16416 -; GCN-NEXT: ds_write_b128 v0, a[108:111] offset:16432 -; GCN-NEXT: ds_write_b128 v0, a[96:99] offset:16384 -; GCN-NEXT: ds_write_b128 v0, a[100:103] offset:16400 -; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:24672 -; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:24688 -; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:24640 -; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:24656 -; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:24608 -; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:24624 -; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:24576 -; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:24592 -; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:32864 -; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:32880 -; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:32832 -; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:32848 -; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:32800 -; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:32816 -; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:32768 -; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:32784 +; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:8288 +; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:8304 +; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:8256 +; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:8272 +; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:8224 +; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:8240 +; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:8192 +; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:8208 +; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:16480 +; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:16496 +; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:16448 +; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:16464 +; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:16416 +; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:16432 +; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:16384 +; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:16400 +; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:24672 +; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:24688 +; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:24640 +; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:24656 +; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:24608 +; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:24624 +; GCN-NEXT: ds_write_b128 v0, a[0:3] offset:24576 +; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:24592 +; GCN-NEXT: ds_write_b128 v0, a[120:123] offset:32864 +; GCN-NEXT: ds_write_b128 v0, a[124:127] offset:32880 +; GCN-NEXT: ds_write_b128 v0, a[112:115] offset:32832 +; GCN-NEXT: ds_write_b128 v0, a[116:119] offset:32848 +; GCN-NEXT: ds_write_b128 v0, a[104:107] offset:32800 +; GCN-NEXT: ds_write_b128 v0, a[108:111] offset:32816 +; GCN-NEXT: ds_write_b128 v0, a[96:99] offset:32768 +; GCN-NEXT: ds_write_b128 v0, a[100:103] offset:32784 ; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0) ; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(40) SyncID(0) ; GCN-NEXT: s_endpgm @@ -730,6 +730,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 1.0 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) ; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s0, v0 ; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v1 offset:112 @@ -740,51 +741,50 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v1 offset:16 ; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v1 offset:32 ; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v1 offset:48 -; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:8304 -; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; EXACTCUTOFF-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:24688 -; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:24672 -; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v1 offset:24656 -; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v1 offset:24640 -; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:24624 -; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:24608 -; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:24592 -; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v1 offset:24576 -; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:49264 -; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:49248 -; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:49232 -; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:49216 -; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:49200 -; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:49184 -; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:49168 -; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, 1.0 -; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v2 offset:57456 -; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v2 offset:57440 -; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v2 offset:57424 -; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v2 offset:57408 -; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v2 offset:57344 -; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v2 offset:57360 -; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v2 offset:57376 -; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v2 offset:57392 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 2.0 +; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:8304 +; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:8288 +; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:8272 +; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:8256 +; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:8240 +; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:8224 +; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:8208 +; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:8192 +; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v1 offset:24688 +; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v1 offset:24672 +; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v1 offset:24656 +; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v1 offset:24640 +; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v1 offset:24624 +; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v1 offset:24608 +; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v1 offset:24592 +; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v1 offset:24576 +; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:49264 +; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:49248 +; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:49232 +; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:49216 +; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:49200 +; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:49184 +; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:49168 +; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:49152 +; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, 0x14000, v1 +; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:112 +; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:96 +; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v1 offset:80 +; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v1 offset:64 +; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v1 +; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:16 +; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:32 +; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:48 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, 2.0 ; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s1, v0 ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0) ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(14) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159] -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v1, a[128:159] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v1, a[64:95] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v1, a[32:63] ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(8) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v1, a[96:127] ; EXACTCUTOFF-NEXT: s_nop 7 ; EXACTCUTOFF-NEXT: s_nop 4 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[156:159] offset:112 @@ -796,38 +796,38 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[132:135] offset:16 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[128:131] ; EXACTCUTOFF-NEXT: v_mov_b32_e32 v0, s1 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:8288 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:8304 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:8256 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:8272 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:8224 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:8240 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3] offset:8192 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:8208 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[120:123] offset:16480 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[124:127] offset:16496 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[112:115] offset:16448 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[116:119] offset:16464 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[104:107] offset:16416 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[108:111] offset:16432 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[96:99] offset:16384 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[100:103] offset:16400 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:24672 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:24688 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:24640 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:24656 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:24608 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:24624 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:24576 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:24592 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:32864 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:32880 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:32832 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:32848 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:32800 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:32816 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:32768 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:32784 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:8288 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:8304 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:8256 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:8272 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:8224 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:8240 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:8192 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:8208 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:16480 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:16496 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:16448 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:16464 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:16416 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:16432 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:16384 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:16400 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:24672 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:24688 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:24640 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:24656 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:24608 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:24624 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3] offset:24576 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:24592 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[120:123] offset:32864 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[124:127] offset:32880 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[112:115] offset:32832 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[116:119] offset:32848 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[104:107] offset:32800 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[108:111] offset:32816 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[96:99] offset:32768 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[100:103] offset:32784 ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(5) SyncID(0) ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(40) SyncID(0) ; EXACTCUTOFF-NEXT: s_endpgm @@ -960,7 +960,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:49152 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] -; GCN-NEXT: v_add_u32_e32 v1, 0x6000, v1 +; GCN-NEXT: v_add_u32_e32 v1, 0x14000, v1 ; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) ; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) @@ -975,14 +975,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:24624 ; GCN-NEXT: ds_write_b128 v0, a[0:3] offset:24576 ; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:24592 -; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:57456 -; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:57440 -; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:57424 -; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:57408 -; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:57344 -; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:57360 -; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:57376 -; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:57392 +; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:112 +; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:96 +; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:80 +; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:64 +; GCN-NEXT: ds_read_b128 a[0:3], v1 +; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:16 +; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:32 +; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:48 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] ; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -1094,7 +1094,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:49152 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) ; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] -; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, 0x6000, v1 +; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, 0x14000, v1 ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(8) SyncID(0) ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) @@ -1109,14 +1109,14 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:24624 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3] offset:24576 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:24592 -; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:57456 -; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:57440 -; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:57424 -; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:57408 -; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:57344 -; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:57360 -; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:57376 -; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:57392 +; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:112 +; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:96 +; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:80 +; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:64 +; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 +; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:16 +; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:32 +; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:48 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) ; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000200) size(8) SyncID(0) @@ -1199,19 +1199,19 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; GCN-LABEL: test_sched_group_barrier_pipeline_interleave_EXP_MFMA: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x44 -; GCN-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; GCN-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b ; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 -; GCN-NEXT: v_mov_b32_e32 v7, 0x32a5705f +; GCN-NEXT: v_mov_b32_e32 v6, 0x32a5705f ; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mul_f32_e32 v4, s0, v3 -; GCN-NEXT: v_rndne_f32_e32 v5, v4 -; GCN-NEXT: v_sub_f32_e32 v6, v4, v5 -; GCN-NEXT: v_fma_f32 v4, s0, v3, -v4 -; GCN-NEXT: v_fmac_f32_e32 v4, s0, v7 -; GCN-NEXT: v_add_f32_e32 v4, v6, v4 -; GCN-NEXT: v_exp_f32_e32 v4, v4 -; GCN-NEXT: v_cvt_i32_f32_e32 v5, v5 +; GCN-NEXT: v_mul_f32_e32 v3, s0, v2 +; GCN-NEXT: v_rndne_f32_e32 v4, v3 +; GCN-NEXT: v_sub_f32_e32 v5, v3, v4 +; GCN-NEXT: v_fma_f32 v3, s0, v2, -v3 +; GCN-NEXT: v_fmac_f32_e32 v3, s0, v6 +; GCN-NEXT: v_add_f32_e32 v3, v5, v3 +; GCN-NEXT: v_exp_f32_e32 v3, v3 +; GCN-NEXT: v_cvt_i32_f32_e32 v4, v4 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; GCN-NEXT: v_add_u32_e32 v1, s6, v0 ; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:112 @@ -1222,112 +1222,113 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:16 ; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:32 ; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:48 -; GCN-NEXT: v_mov_b32_e32 v9, 1.0 -; GCN-NEXT: v_ldexp_f32 v4, v4, v5 -; GCN-NEXT: v_mov_b32_e32 v5, 0xc2ce8ed0 -; GCN-NEXT: v_mul_f32_e32 v10, s1, v3 -; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s0, v5 -; GCN-NEXT: v_mov_b32_e32 v6, 0x42b17218 -; GCN-NEXT: v_rndne_f32_e32 v11, v10 -; GCN-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v6 -; GCN-NEXT: v_mov_b32_e32 v8, 0x7f800000 -; GCN-NEXT: v_sub_f32_e32 v12, v10, v11 -; GCN-NEXT: v_fma_f32 v10, s1, v3, -v10 -; GCN-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc -; GCN-NEXT: v_fmac_f32_e32 v10, s1, v7 -; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:8304 +; GCN-NEXT: v_mov_b32_e32 v8, 1.0 +; GCN-NEXT: v_ldexp_f32 v3, v3, v4 +; GCN-NEXT: v_mov_b32_e32 v4, 0xc2ce8ed0 +; GCN-NEXT: v_mul_f32_e32 v9, s1, v2 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s0, v4 +; GCN-NEXT: v_mov_b32_e32 v5, 0x42b17218 +; GCN-NEXT: v_rndne_f32_e32 v10, v9 +; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v5 +; GCN-NEXT: v_mov_b32_e32 v7, 0x7f800000 +; GCN-NEXT: v_sub_f32_e32 v11, v9, v10 +; GCN-NEXT: v_fma_f32 v9, s1, v2, -v9 +; GCN-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GCN-NEXT: v_fmac_f32_e32 v9, s1, v6 +; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:8304 ; GCN-NEXT: s_waitcnt lgkmcnt(1) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v9, v4, a[96:127] -; GCN-NEXT: v_add_f32_e32 v4, v12, v10 -; GCN-NEXT: v_exp_f32_e32 v4, v4 -; GCN-NEXT: v_cvt_i32_f32_e32 v10, v11 -; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; GCN-NEXT: v_ldexp_f32 v4, v4, v10 -; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s1, v5 -; GCN-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s1, v6 -; GCN-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc -; GCN-NEXT: v_mul_f32_e32 v10, s2, v3 -; GCN-NEXT: v_rndne_f32_e32 v11, v10 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v8, v3, a[96:127] +; GCN-NEXT: v_add_f32_e32 v3, v11, v9 +; GCN-NEXT: v_exp_f32_e32 v3, v3 +; GCN-NEXT: v_cvt_i32_f32_e32 v9, v10 +; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:8288 +; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:8272 +; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:8256 +; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:8240 +; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:8224 +; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:8208 +; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:8192 +; GCN-NEXT: v_ldexp_f32 v3, v3, v9 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s1, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s1, v5 +; GCN-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GCN-NEXT: v_mul_f32_e32 v9, s2, v2 +; GCN-NEXT: v_rndne_f32_e32 v10, v9 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v9, v4, a[0:31] -; GCN-NEXT: v_fma_f32 v4, s2, v3, -v10 -; GCN-NEXT: v_sub_f32_e32 v12, v10, v11 -; GCN-NEXT: v_fmac_f32_e32 v4, s2, v7 -; GCN-NEXT: v_add_f32_e32 v4, v12, v4 -; GCN-NEXT: v_exp_f32_e32 v4, v4 -; GCN-NEXT: v_cvt_i32_f32_e32 v10, v11 -; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:24688 -; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:24672 -; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:24656 -; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:24640 -; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:24624 -; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:24608 -; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:24592 -; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:24576 -; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:49264 -; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:49248 -; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:49232 -; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:49216 -; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:49200 -; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:49184 -; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:49168 -; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:49152 -; GCN-NEXT: v_ldexp_f32 v1, v4, v10 -; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s2, v5 -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s2, v6 -; GCN-NEXT: v_mul_f32_e32 v4, s3, v3 -; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc -; GCN-NEXT: v_rndne_f32_e32 v10, v4 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v8, v3, a[64:95] +; GCN-NEXT: v_fma_f32 v3, s2, v2, -v9 +; GCN-NEXT: v_sub_f32_e32 v11, v9, v10 +; GCN-NEXT: v_fmac_f32_e32 v3, s2, v6 +; GCN-NEXT: v_add_f32_e32 v3, v11, v3 +; GCN-NEXT: v_exp_f32_e32 v3, v3 +; GCN-NEXT: v_cvt_i32_f32_e32 v9, v10 +; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:24688 +; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:24672 +; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:24656 +; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:24640 +; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:24624 +; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:24608 +; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:24592 +; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:24576 +; GCN-NEXT: v_ldexp_f32 v3, v3, v9 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s2, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s2, v5 +; GCN-NEXT: v_mul_f32_e32 v9, s3, v2 +; GCN-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GCN-NEXT: v_rndne_f32_e32 v10, v9 ; GCN-NEXT: s_load_dword s8, s[4:5], 0x54 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v9, v1, a[64:95] -; GCN-NEXT: v_sub_f32_e32 v1, v4, v10 -; GCN-NEXT: v_fma_f32 v4, s3, v3, -v4 -; GCN-NEXT: v_fmac_f32_e32 v4, s3, v7 -; GCN-NEXT: v_add_f32_e32 v1, v1, v4 -; GCN-NEXT: v_exp_f32_e32 v1, v1 -; GCN-NEXT: v_cvt_i32_f32_e32 v4, v10 -; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s3, v5 -; GCN-NEXT: ds_read_b128 a[156:159], v2 offset:57456 -; GCN-NEXT: ds_read_b128 a[152:155], v2 offset:57440 -; GCN-NEXT: v_ldexp_f32 v1, v1, v4 -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s3, v6 -; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc -; GCN-NEXT: v_mul_f32_e32 v4, s8, v3 -; GCN-NEXT: v_fma_f32 v3, s8, v3, -v4 -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v9, v1, a[32:63] -; GCN-NEXT: v_rndne_f32_e32 v1, v4 -; GCN-NEXT: v_sub_f32_e32 v10, v4, v1 -; GCN-NEXT: v_fmac_f32_e32 v3, s8, v7 -; GCN-NEXT: v_add_f32_e32 v3, v10, v3 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v8, v3, a[32:63] +; GCN-NEXT: v_sub_f32_e32 v3, v9, v10 +; GCN-NEXT: v_fma_f32 v9, s3, v2, -v9 +; GCN-NEXT: v_fmac_f32_e32 v9, s3, v6 +; GCN-NEXT: v_add_f32_e32 v3, v3, v9 ; GCN-NEXT: v_exp_f32_e32 v3, v3 -; GCN-NEXT: v_cvt_i32_f32_e32 v1, v1 -; GCN-NEXT: ds_read_b128 a[148:151], v2 offset:57424 -; GCN-NEXT: ds_read_b128 a[144:147], v2 offset:57408 -; GCN-NEXT: ds_read_b128 a[128:131], v2 offset:57344 -; GCN-NEXT: ds_read_b128 a[132:135], v2 offset:57360 -; GCN-NEXT: ds_read_b128 a[136:139], v2 offset:57376 -; GCN-NEXT: ds_read_b128 a[140:143], v2 offset:57392 -; GCN-NEXT: v_ldexp_f32 v1, v3, v1 -; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v5 +; GCN-NEXT: v_cvt_i32_f32_e32 v9, v10 +; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:49264 +; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:49248 +; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:49232 +; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:49216 +; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:49200 +; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:49184 +; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:49168 +; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:49152 +; GCN-NEXT: v_ldexp_f32 v3, v3, v9 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s3, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s3, v5 +; GCN-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GCN-NEXT: v_mul_f32_e32 v9, s8, v2 +; GCN-NEXT: v_fma_f32 v2, s8, v2, -v9 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v8, v3, a[0:31] +; GCN-NEXT: v_rndne_f32_e32 v3, v9 +; GCN-NEXT: v_sub_f32_e32 v10, v9, v3 +; GCN-NEXT: v_fmac_f32_e32 v2, s8, v6 +; GCN-NEXT: v_add_f32_e32 v2, v10, v2 +; GCN-NEXT: v_exp_f32_e32 v2, v2 +; GCN-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GCN-NEXT: v_add_u32_e32 v1, 0x14000, v1 +; GCN-NEXT: ds_read_b128 a[156:159], v1 offset:112 +; GCN-NEXT: ds_read_b128 a[152:155], v1 offset:96 +; GCN-NEXT: ds_read_b128 a[148:151], v1 offset:80 +; GCN-NEXT: ds_read_b128 a[144:147], v1 offset:64 +; GCN-NEXT: ds_read_b128 a[128:131], v1 +; GCN-NEXT: ds_read_b128 a[132:135], v1 offset:16 +; GCN-NEXT: ds_read_b128 a[136:139], v1 offset:32 +; GCN-NEXT: ds_read_b128 a[140:143], v1 offset:48 +; GCN-NEXT: v_ldexp_f32 v1, v2, v3 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v4 ; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v6 -; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v5 +; GCN-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc ; GCN-NEXT: v_add_u32_e32 v0, s7, v0 ; GCN-NEXT: ds_write_b128 v0, a[124:127] offset:112 ; GCN-NEXT: s_waitcnt lgkmcnt(1) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v9, v1, a[128:159] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v8, v1, a[128:159] ; GCN-NEXT: ds_write_b128 v0, a[120:123] offset:96 ; GCN-NEXT: ds_write_b128 v0, a[116:119] offset:80 ; GCN-NEXT: ds_write_b128 v0, a[112:115] offset:64 @@ -1347,30 +1348,30 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) ; GCN-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0) ; GCN-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) -; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:8288 -; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:8304 -; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:8256 -; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:8272 -; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:8224 -; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:8240 -; GCN-NEXT: ds_write_b128 v0, a[0:3] offset:8192 -; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:8208 -; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:16480 -; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:16496 -; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:16448 -; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:16464 -; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:16416 -; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:16432 -; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:16384 -; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:16400 -; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:24672 -; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:24688 -; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:24640 -; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:24656 -; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:24608 -; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:24624 -; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:24576 -; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:24592 +; GCN-NEXT: ds_write_b128 v0, a[88:91] offset:8288 +; GCN-NEXT: ds_write_b128 v0, a[92:95] offset:8304 +; GCN-NEXT: ds_write_b128 v0, a[80:83] offset:8256 +; GCN-NEXT: ds_write_b128 v0, a[84:87] offset:8272 +; GCN-NEXT: ds_write_b128 v0, a[72:75] offset:8224 +; GCN-NEXT: ds_write_b128 v0, a[76:79] offset:8240 +; GCN-NEXT: ds_write_b128 v0, a[64:67] offset:8192 +; GCN-NEXT: ds_write_b128 v0, a[68:71] offset:8208 +; GCN-NEXT: ds_write_b128 v0, a[56:59] offset:16480 +; GCN-NEXT: ds_write_b128 v0, a[60:63] offset:16496 +; GCN-NEXT: ds_write_b128 v0, a[48:51] offset:16448 +; GCN-NEXT: ds_write_b128 v0, a[52:55] offset:16464 +; GCN-NEXT: ds_write_b128 v0, a[40:43] offset:16416 +; GCN-NEXT: ds_write_b128 v0, a[44:47] offset:16432 +; GCN-NEXT: ds_write_b128 v0, a[32:35] offset:16384 +; GCN-NEXT: ds_write_b128 v0, a[36:39] offset:16400 +; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:24672 +; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:24688 +; GCN-NEXT: ds_write_b128 v0, a[16:19] offset:24640 +; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:24656 +; GCN-NEXT: ds_write_b128 v0, a[8:11] offset:24608 +; GCN-NEXT: ds_write_b128 v0, a[12:15] offset:24624 +; GCN-NEXT: ds_write_b128 v0, a[0:3] offset:24576 +; GCN-NEXT: ds_write_b128 v0, a[4:7] offset:24592 ; GCN-NEXT: ds_write_b128 v0, a[152:155] offset:32864 ; GCN-NEXT: ds_write_b128 v0, a[156:159] offset:32880 ; GCN-NEXT: ds_write_b128 v0, a[144:147] offset:32832 @@ -1384,19 +1385,19 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_interleave_EXP_MFMA: ; EXACTCUTOFF: ; %bb.0: ; %entry ; EXACTCUTOFF-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x44 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b ; EXACTCUTOFF-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v7, 0x32a5705f +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v6, 0x32a5705f ; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s0, v3 -; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v5, v4 -; EXACTCUTOFF-NEXT: v_sub_f32_e32 v6, v4, v5 -; EXACTCUTOFF-NEXT: v_fma_f32 v4, s0, v3, -v4 -; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s0, v7 -; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v6, v4 -; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4 -; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v5, v5 +; EXACTCUTOFF-NEXT: v_mul_f32_e32 v3, s0, v2 +; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v4, v3 +; EXACTCUTOFF-NEXT: v_sub_f32_e32 v5, v3, v4 +; EXACTCUTOFF-NEXT: v_fma_f32 v3, s0, v2, -v3 +; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v3, s0, v6 +; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v5, v3 +; EXACTCUTOFF-NEXT: v_exp_f32_e32 v3, v3 +; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v4, v4 ; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s6, v0 ; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:112 @@ -1407,112 +1408,113 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:16 ; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:32 ; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:48 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v9, 1.0 -; EXACTCUTOFF-NEXT: v_ldexp_f32 v4, v4, v5 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v5, 0xc2ce8ed0 -; EXACTCUTOFF-NEXT: v_mul_f32_e32 v10, s1, v3 -; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s0, v5 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v6, 0x42b17218 -; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v11, v10 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v6 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v8, 0x7f800000 -; EXACTCUTOFF-NEXT: v_sub_f32_e32 v12, v10, v11 -; EXACTCUTOFF-NEXT: v_fma_f32 v10, s1, v3, -v10 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc -; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v10, s1, v7 -; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:8304 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v8, 1.0 +; EXACTCUTOFF-NEXT: v_ldexp_f32 v3, v3, v4 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v4, 0xc2ce8ed0 +; EXACTCUTOFF-NEXT: v_mul_f32_e32 v9, s1, v2 +; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s0, v4 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v5, 0x42b17218 +; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v10, v9 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v5 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v7, 0x7f800000 +; EXACTCUTOFF-NEXT: v_sub_f32_e32 v11, v9, v10 +; EXACTCUTOFF-NEXT: v_fma_f32 v9, s1, v2, -v9 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v9, s1, v6 +; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:8304 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(1) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v9, v4, a[96:127] -; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v12, v10 -; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4 -; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v10, v11 -; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; EXACTCUTOFF-NEXT: v_ldexp_f32 v4, v4, v10 -; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s1, v5 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s1, v6 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc -; EXACTCUTOFF-NEXT: v_mul_f32_e32 v10, s2, v3 -; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v11, v10 +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v8, v3, a[96:127] +; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v11, v9 +; EXACTCUTOFF-NEXT: v_exp_f32_e32 v3, v3 +; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v9, v10 +; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:8288 +; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:8272 +; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:8256 +; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:8240 +; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:8224 +; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:8208 +; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:8192 +; EXACTCUTOFF-NEXT: v_ldexp_f32 v3, v3, v9 +; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s1, v4 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s1, v5 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; EXACTCUTOFF-NEXT: v_mul_f32_e32 v9, s2, v2 +; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v10, v9 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v9, v4, a[0:31] -; EXACTCUTOFF-NEXT: v_fma_f32 v4, s2, v3, -v10 -; EXACTCUTOFF-NEXT: v_sub_f32_e32 v12, v10, v11 -; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s2, v7 -; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v12, v4 -; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4 -; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v10, v11 -; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:24688 -; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:24672 -; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:24656 -; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:24640 -; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:24624 -; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:24608 -; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:24592 -; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:24576 -; EXACTCUTOFF-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v1 offset:49264 -; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v1 offset:49248 -; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v1 offset:49232 -; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v1 offset:49216 -; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v1 offset:49200 -; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v1 offset:49184 -; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v1 offset:49168 -; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v1 offset:49152 -; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v4, v10 -; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s2, v5 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s2, v6 -; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s3, v3 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc -; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v10, v4 +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v8, v3, a[64:95] +; EXACTCUTOFF-NEXT: v_fma_f32 v3, s2, v2, -v9 +; EXACTCUTOFF-NEXT: v_sub_f32_e32 v11, v9, v10 +; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v3, s2, v6 +; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v11, v3 +; EXACTCUTOFF-NEXT: v_exp_f32_e32 v3, v3 +; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v9, v10 +; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v1 offset:24688 +; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v1 offset:24672 +; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v1 offset:24656 +; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v1 offset:24640 +; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v1 offset:24624 +; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v1 offset:24608 +; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v1 offset:24592 +; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v1 offset:24576 +; EXACTCUTOFF-NEXT: v_ldexp_f32 v3, v3, v9 +; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s2, v4 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s2, v5 +; EXACTCUTOFF-NEXT: v_mul_f32_e32 v9, s3, v2 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v10, v9 ; EXACTCUTOFF-NEXT: s_load_dword s8, s[4:5], 0x54 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v9, v1, a[64:95] -; EXACTCUTOFF-NEXT: v_sub_f32_e32 v1, v4, v10 -; EXACTCUTOFF-NEXT: v_fma_f32 v4, s3, v3, -v4 -; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v4, s3, v7 -; EXACTCUTOFF-NEXT: v_add_f32_e32 v1, v1, v4 -; EXACTCUTOFF-NEXT: v_exp_f32_e32 v1, v1 -; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v4, v10 -; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s3, v5 -; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v2 offset:57456 -; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v2 offset:57440 -; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v1, v4 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s3, v6 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc -; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s8, v3 -; EXACTCUTOFF-NEXT: v_fma_f32 v3, s8, v3, -v4 -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v9, v1, a[32:63] -; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v1, v4 -; EXACTCUTOFF-NEXT: v_sub_f32_e32 v10, v4, v1 -; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v3, s8, v7 -; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v10, v3 +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v8, v3, a[32:63] +; EXACTCUTOFF-NEXT: v_sub_f32_e32 v3, v9, v10 +; EXACTCUTOFF-NEXT: v_fma_f32 v9, s3, v2, -v9 +; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v9, s3, v6 +; EXACTCUTOFF-NEXT: v_add_f32_e32 v3, v3, v9 ; EXACTCUTOFF-NEXT: v_exp_f32_e32 v3, v3 -; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v1, v1 -; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v2 offset:57424 -; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v2 offset:57408 -; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v2 offset:57344 -; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v2 offset:57360 -; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v2 offset:57376 -; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v2 offset:57392 -; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v3, v1 -; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v5 +; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v9, v10 +; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:49264 +; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:49248 +; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:49232 +; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:49216 +; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:49200 +; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:49184 +; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:49168 +; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:49152 +; EXACTCUTOFF-NEXT: v_ldexp_f32 v3, v3, v9 +; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s3, v4 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s3, v5 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; EXACTCUTOFF-NEXT: v_mul_f32_e32 v9, s8, v2 +; EXACTCUTOFF-NEXT: v_fma_f32 v2, s8, v2, -v9 +; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v8, v3, a[0:31] +; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v3, v9 +; EXACTCUTOFF-NEXT: v_sub_f32_e32 v10, v9, v3 +; EXACTCUTOFF-NEXT: v_fmac_f32_e32 v2, s8, v6 +; EXACTCUTOFF-NEXT: v_add_f32_e32 v2, v10, v2 +; EXACTCUTOFF-NEXT: v_exp_f32_e32 v2, v2 +; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v3, v3 +; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, 0x14000, v1 +; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v1 offset:112 +; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v1 offset:96 +; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v1 offset:80 +; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v1 offset:64 +; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v1 +; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v1 offset:16 +; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v1 offset:32 +; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v1 offset:48 +; EXACTCUTOFF-NEXT: v_ldexp_f32 v1, v2, v3 +; EXACTCUTOFF-NEXT: v_cmp_nlt_f32_e32 vcc, s8, v4 ; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v6 -; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc +; EXACTCUTOFF-NEXT: v_cmp_ngt_f32_e32 vcc, s8, v5 +; EXACTCUTOFF-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc ; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s7, v0 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[124:127] offset:112 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(1) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v9, v1, a[128:159] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v8, v1, a[128:159] ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[120:123] offset:96 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[116:119] offset:80 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[112:115] offset:64 @@ -1532,30 +1534,30 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000400) size(1) SyncID(0) ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000008) size(1) SyncID(0) -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:8288 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:8304 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:8256 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:8272 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:8224 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:8240 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3] offset:8192 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:8208 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:16480 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:16496 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:16448 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:16464 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:16416 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:16432 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:16384 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:16400 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:24672 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:24688 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:24640 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:24656 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:24608 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:24624 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:24576 -; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:24592 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[88:91] offset:8288 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[92:95] offset:8304 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[80:83] offset:8256 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[84:87] offset:8272 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[72:75] offset:8224 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[76:79] offset:8240 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[64:67] offset:8192 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[68:71] offset:8208 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[56:59] offset:16480 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[60:63] offset:16496 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[48:51] offset:16448 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[52:55] offset:16464 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[40:43] offset:16416 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[44:47] offset:16432 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[32:35] offset:16384 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[36:39] offset:16400 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[24:27] offset:24672 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[28:31] offset:24688 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[16:19] offset:24640 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[20:23] offset:24656 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[8:11] offset:24608 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[12:15] offset:24624 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[0:3] offset:24576 +; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[4:7] offset:24592 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[152:155] offset:32864 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[156:159] offset:32880 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[144:147] offset:32832 diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll index 553d7e09390fd..680942fcb4d4b 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll +++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll @@ -279,11 +279,11 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no ; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; CHECK-NEXT: ds_write_b32 v0, v58 ; CHECK-NEXT: s_branch .LBB0_7 -; CHECK-NEXT: .LBB0_16: ; %Flow45 +; CHECK-NEXT: .LBB0_16: ; %Flow43 ; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s69 ; CHECK-NEXT: v_mov_b32_e32 v57, v0 -; CHECK-NEXT: .LBB0_17: ; %Flow46 +; CHECK-NEXT: .LBB0_17: ; %Flow44 ; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s68 ; CHECK-NEXT: s_mov_b32 s55, exec_lo @@ -330,11 +330,11 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no ; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; CHECK-NEXT: ds_write_b32 v0, v57 ; CHECK-NEXT: s_branch .LBB0_19 -; CHECK-NEXT: .LBB0_22: ; %Flow43 +; CHECK-NEXT: .LBB0_22: ; %Flow41 ; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1 ; CHECK-NEXT: s_inst_prefetch 0x2 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s68 -; CHECK-NEXT: .LBB0_23: ; %Flow44 +; CHECK-NEXT: .LBB0_23: ; %Flow42 ; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s55 ; CHECK-NEXT: ; %bb.24: ; in Loop: Header=BB0_5 Depth=1 @@ -347,7 +347,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no ; CHECK-NEXT: s_or_b32 s53, s4, s53 ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s53 ; CHECK-NEXT: s_cbranch_execnz .LBB0_5 -; CHECK-NEXT: .LBB0_25: ; %Flow51 +; CHECK-NEXT: .LBB0_25: ; %Flow49 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s52 ; CHECK-NEXT: v_mov_b32_e32 v31, v40 ; CHECK-NEXT: v_mov_b32_e32 v0, 1 diff --git a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll index c5732531f5423..83ce91108f7f8 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll @@ -75,9 +75,9 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) % ; CHECK-LABEL: {{^}}excess_soft_clause_reg_pressure: ; GFX908: NumSgprs: 64 ; GFX908-GCNTRACKERS: NumSgprs: 64 -; GFX908: NumVgprs: 43 +; GFX908: NumVgprs: 39 ; GFX908-GCNTRACKERS: NumVgprs: 39 -; GFX908: Occupancy: 5 +; GFX908: Occupancy: 6 ; GFX908-GCNTRACKERS: Occupancy: 6 diff --git a/llvm/test/Transforms/SinkGEPConstOffset/AMDGPU/sink-gep-const-offset.ll b/llvm/test/Transforms/SinkGEPConstOffset/AMDGPU/sink-gep-const-offset.ll new file mode 100644 index 0000000000000..1721a17467b28 --- /dev/null +++ b/llvm/test/Transforms/SinkGEPConstOffset/AMDGPU/sink-gep-const-offset.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=amdgcn-amd-amdhsa \ +; RUN: -passes=sink-gep-const-offset -S | FileCheck %s + +define void @kernel__0(ptr addrspace(5) noalias %pout, ptr addrspace(3) noalias %pin, i32 %num, i32 %ofst0, i32 %ofst1, i32 %ofst2, i32 %ofst3, i32 %ofst4) { +; CHECK-LABEL: @kernel__0( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = srem i32 [[NUM:%.*]], 1024 +; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[TMP0]], 3 +; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP0]], [[OFST0:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = add nsw i32 [[TMP0]], [[OFST1:%.*]] +; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 [[TMP0]], [[OFST2:%.*]] +; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP0]], [[OFST3:%.*]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[PIN:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[TMP6]], i32 111 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[PIN]], i32 [[TMP3]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[TMP8]], i32 111 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[PIN]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[PIN]], i32 [[TMP3]] +; CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[TMP10]], i32 555 +; CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds half, ptr addrspace(3) [[TMP11]], i32 666 +; CHECK-NEXT: [[TMP12:%.*]] = mul nsw i32 [[TMP0]], [[OFST0]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[POUT:%.*]], i32 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP13]], i32 [[TMP4]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP14]], i32 555 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP13]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP16]], i32 555 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[POUT]], i32 [[TMP12]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP18]], i32 [[TMP4]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP18]], i32 [[TMP5]] +; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP19]], i32 1443 +; CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds half, ptr addrspace(5) [[TMP20]], i32 1554 +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[TMP21:%.*]] = load half, ptr addrspace(3) [[TMP7]], align 2 +; CHECK-NEXT: [[TMP22:%.*]] = load half, ptr addrspace(3) [[TMP9]], align 2 +; CHECK-NEXT: [[TMP23:%.*]] = load half, ptr addrspace(3) [[T0]], align 2 +; CHECK-NEXT: [[TMP24:%.*]] = load half, ptr addrspace(3) [[T1]], align 2 +; CHECK-NEXT: store half [[TMP21]], ptr addrspace(5) [[TMP15]], align 2 +; CHECK-NEXT: store half [[TMP22]], ptr addrspace(5) [[TMP17]], align 2 +; CHECK-NEXT: store half [[TMP23]], ptr addrspace(5) [[T2]], align 2 +; CHECK-NEXT: store half [[TMP24]], ptr addrspace(5) [[T3]], align 2 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[PIN]], i32 [[OFST4:%.*]] +; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr addrspace(3) [[TMP25]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr addrspace(5) [[POUT]], i32 [[OFST4]] +; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr addrspace(5) [[TMP27]], align 4 +; CHECK-NEXT: store i32 [[TMP28]], ptr addrspace(3) [[PIN]], align 4 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP26]], [[TMP28]] +; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + %0 = srem i32 %num, 1024 + %1 = add nsw i32 %0, 3 + %2 = add nsw i32 %0, %ofst0 + %3 = add nsw i32 %0, %ofst1 + %4 = add nsw i32 %0, %ofst2 + %5 = add nsw i32 %0, %ofst3 + + %6 = getelementptr inbounds half, ptr addrspace(3) %pin, i32 111 + %7 = getelementptr inbounds half, ptr addrspace(3) %6, i32 %2 + %8 = getelementptr inbounds half, ptr addrspace(3) %6, i32 %3 + %9 = getelementptr inbounds half, ptr addrspace(3) %pin, i32 222 + %10 = getelementptr inbounds half, ptr addrspace(3) %9, i32 %2 + %11 = getelementptr inbounds half, ptr addrspace(3) %9, i32 %3 + %t0 = getelementptr inbounds half, ptr addrspace(3) %10, i32 333 + %t1 = getelementptr inbounds half, ptr addrspace(3) %11, i32 444 + + %12 = mul nsw i32 %0, %ofst0 + + %13 = getelementptr inbounds half, ptr addrspace(5) %pout, i32 555 + %14 = getelementptr inbounds half, ptr addrspace(5) %13, i32 %12 + %15 = getelementptr inbounds half, ptr addrspace(5) %14, i32 %4 + %16 = getelementptr inbounds half, ptr addrspace(5) %14, i32 %5 + %17 = getelementptr inbounds half, ptr addrspace(5) %pout, i32 666 + %18 = getelementptr inbounds half, ptr addrspace(5) %17, i32 %12 + %19 = getelementptr inbounds half, ptr addrspace(5) %18, i32 %4 + %20 = getelementptr inbounds half, ptr addrspace(5) %18, i32 %5 + %t2 = getelementptr inbounds half, ptr addrspace(5) %19, i32 777 + %t3 = getelementptr inbounds half, ptr addrspace(5) %20, i32 888 + + br label %loop + +loop: ; loop + %21 = load half, ptr addrspace(3) %7, align 2 + %22 = load half, ptr addrspace(3) %8, align 2 + %23 = load half, ptr addrspace(3) %t0, align 2 + %24 = load half, ptr addrspace(3) %t1, align 2 + + store half %21, ptr addrspace(5) %15, align 2 + store half %22, ptr addrspace(5) %16, align 2 + store half %23, ptr addrspace(5) %t2, align 2 + store half %24, ptr addrspace(5) %t3, align 2 + + %25 = getelementptr inbounds i32, ptr addrspace(3) %pin, i32 %ofst4 + %26 = load i32, ptr addrspace(3) %25, align 4 + %27 = getelementptr inbounds i32, ptr addrspace(5) %pout, i32 %ofst4 + %28 = load i32, ptr addrspace(5) %27, align 4 + store i32 %28, ptr addrspace(3) %pin, align 4 + %cond = icmp eq i32 %26, %28 + br i1 %cond, label %loop, label %exit + +exit: + ret void +} diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn index 28efe0db6a82c..5310a0eaf4f92 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn @@ -82,6 +82,7 @@ static_library("Scalar") { "ScalarizeMaskedMemIntrin.cpp", "Scalarizer.cpp", "SeparateConstOffsetFromGEP.cpp", + "SinkGEPConstOffset.cpp", "SimpleLoopUnswitch.cpp", "SimplifyCFGPass.cpp", "Sink.cpp",