diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 06a92bde4fde1..cbef18555c4ba 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -2667,6 +2668,90 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, return V; } +/// This function takes two vector values and combines them into a single vector +/// by concatenating their elements. The function handles: +/// +/// 1. Element type mismatch: If either vector's element type differs from +/// NewAIEltType, the function bitcasts the vector to use NewAIEltType while +/// preserving the total bit width (adjusting the number of elements +/// accordingly). +/// +/// 2. Size mismatch: After transforming the vectors to have the desired element +/// type, if the two vectors have different numbers of elements, the smaller +/// vector is extended with poison values to match the size of the larger +/// vector before concatenation. +/// +/// 3. Concatenation: The vectors are merged using a shuffle operation that +/// places all elements of V0 first, followed by all elements of V1. +/// +/// \param V0 The first vector to merge (must be a vector type) +/// \param V1 The second vector to merge (must be a vector type) +/// \param DL The data layout for size calculations +/// \param NewAIEltTy The desired element type for the result vector +/// \param Builder IRBuilder for creating new instructions +/// \return A new vector containing all elements from V0 followed by all +/// elements from V1 +static Value *mergeTwoVectors(Value *V0, Value *V1, const DataLayout &DL, + Type *NewAIEltTy, IRBuilder<> &Builder) { + // V0 and V1 are vectors + // Create a new vector type with combined elements + // Use ShuffleVector to concatenate the vectors + auto *VecType0 = cast(V0->getType()); + auto *VecType1 = cast(V1->getType()); + + // If V0/V1 element types are different from NewAllocaElementType, + // we need to introduce bitcasts before merging them + auto BitcastIfNeeded = [&](Value *&V, FixedVectorType *&VecType, + const char *DebugName) { + Type *EltType = VecType->getElementType(); + if (EltType != NewAIEltTy) { + // Calculate new number of elements to maintain same bit width + unsigned TotalBits = + VecType->getNumElements() * DL.getTypeSizeInBits(EltType); + unsigned NewNumElts = TotalBits / DL.getTypeSizeInBits(NewAIEltTy); + + auto *NewVecType = FixedVectorType::get(NewAIEltTy, NewNumElts); + V = Builder.CreateBitCast(V, NewVecType); + VecType = NewVecType; + LLVM_DEBUG(dbgs() << " bitcast " << DebugName << ": " << *V << "\n"); + } + }; + + BitcastIfNeeded(V0, VecType0, "V0"); + BitcastIfNeeded(V1, VecType1, "V1"); + + unsigned NumElts0 = VecType0->getNumElements(); + unsigned NumElts1 = VecType1->getNumElements(); + + SmallVector ShuffleMask; + + if (NumElts0 == NumElts1) { + for (unsigned i = 0; i < NumElts0 + NumElts1; ++i) + ShuffleMask.push_back(i); + } else { + // If two vectors have different sizes, we need to extend + // the smaller vector to the size of the larger vector. + unsigned SmallSize = std::min(NumElts0, NumElts1); + unsigned LargeSize = std::max(NumElts0, NumElts1); + bool IsV0Smaller = NumElts0 < NumElts1; + Value *&ExtendedVec = IsV0Smaller ? V0 : V1; + SmallVector ExtendMask; + for (unsigned i = 0; i < SmallSize; ++i) + ExtendMask.push_back(i); + for (unsigned i = SmallSize; i < LargeSize; ++i) + ExtendMask.push_back(PoisonMaskElem); + ExtendedVec = Builder.CreateShuffleVector( + ExtendedVec, PoisonValue::get(ExtendedVec->getType()), ExtendMask); + LLVM_DEBUG(dbgs() << " shufflevector: " << *ExtendedVec << "\n"); + for (unsigned i = 0; i < NumElts0; ++i) + ShuffleMask.push_back(i); + for (unsigned i = 0; i < NumElts1; ++i) + ShuffleMask.push_back(LargeSize + i); + } + + return Builder.CreateShuffleVector(V0, V1, ShuffleMask); +} + namespace { /// Visitor to rewrite instructions using p particular slice of an alloca @@ -2811,6 +2896,213 @@ class AllocaSliceRewriter : public InstVisitor { return CanSROA; } + /// Attempts to rewrite a partition using tree-structured merge optimization. + /// + /// This function analyzes a partition to determine if it can be optimized + /// using a tree-structured merge pattern, where multiple non-overlapping + /// stores completely fill an alloca. And there is no load from the alloca in + /// the middle of the stores. Such patterns can be optimized by eliminating + /// the intermediate stores and directly constructing the final vector by + /// using shufflevectors. + /// + /// Example transformation: + /// Before: (stores do not have to be in order) + /// %alloca = alloca <8 x float> + /// store <2 x float> %val0, ptr %alloca ; offset 0-1 + /// store <2 x float> %val2, ptr %alloca+16 ; offset 4-5 + /// store <2 x float> %val1, ptr %alloca+8 ; offset 2-3 + /// store <2 x float> %val3, ptr %alloca+24 ; offset 6-7 + /// + /// After: + /// %alloca = alloca <8 x float> + /// %shuffle0 = shufflevector %val0, %val1, <4 x i32> + /// %shuffle1 = shufflevector %val2, %val3, <4 x i32> + /// %shuffle2 = shufflevector %shuffle0, %shuffle1, <8 x i32> + /// store %shuffle2, ptr %alloca + /// + /// The optimization looks for partitions that: + /// 1. Have no overlapping split slice tails + /// 2. Contain non-overlapping stores that cover the entire alloca + /// 3. Have exactly one load that reads the complete alloca structure and not + /// in the middle of the stores (TODO: maybe we can relax the constraint + /// about reading the entire alloca structure) + /// + /// \param P The partition to analyze and potentially rewrite + /// \return An optional vector of values that were deleted during the rewrite + /// process, or std::nullopt if the partition cannot be optimized + /// using tree-structured merge + std::optional> + rewriteTreeStructuredMerge(Partition &P) { + // No tail slices that overlap with the partition + if (P.splitSliceTails().size() > 0) + return std::nullopt; + + SmallVector DeletedValues; + LoadInst *TheLoad = nullptr; + + // Structure to hold store information + struct StoreInfo { + StoreInst *Store; + uint64_t BeginOffset; + uint64_t EndOffset; + Value *StoredValue; + StoreInfo(StoreInst *SI, uint64_t Begin, uint64_t End, Value *Val) + : Store(SI), BeginOffset(Begin), EndOffset(End), StoredValue(Val) {} + }; + + SmallVector StoreInfos; + + // If the new alloca is a fixed vector type, we use its element type as the + // allocated element type, otherwise we use i8 as the allocated element + Type *AllocatedEltTy = + isa(NewAI.getAllocatedType()) + ? cast(NewAI.getAllocatedType())->getElementType() + : Type::getInt8Ty(NewAI.getContext()); + + // Helper to check if a type is + // 1. A fixed vector type + // 2. The element type is not a pointer + // 3. The element type size is byte-aligned + // We only handle the cases that the ld/st meet these conditions + auto IsTypeValidForTreeStructuredMerge = [&](Type *Ty) -> bool { + auto *FixedVecTy = dyn_cast(Ty); + return FixedVecTy && + DL.getTypeSizeInBits(FixedVecTy->getElementType()) % 8 == 0 && + !FixedVecTy->getElementType()->isPointerTy(); + }; + + for (Slice &S : P) { + auto *User = cast(S.getUse()->getUser()); + if (auto *LI = dyn_cast(User)) { + // Do not handle the case if + // 1. There is more than one load + // 2. The load is volatile + // 3. The load does not read the entire alloca structure + // 4. The load does not meet the conditions in the helper function + if (TheLoad || !IsTypeValidForTreeStructuredMerge(LI->getType()) || + S.beginOffset() != NewAllocaBeginOffset || + S.endOffset() != NewAllocaEndOffset || LI->isVolatile()) + return std::nullopt; + TheLoad = LI; + } else if (auto *SI = dyn_cast(User)) { + // Do not handle the case if + // 1. The store does not meet the conditions in the helper function + // 2. The store is volatile + if (!IsTypeValidForTreeStructuredMerge( + SI->getValueOperand()->getType()) || + SI->isVolatile()) + return std::nullopt; + StoreInfos.emplace_back(SI, S.beginOffset(), S.endOffset(), + SI->getValueOperand()); + } else { + // If we have instructions other than load and store, we cannot do the + // tree structured merge + return std::nullopt; + } + } + // If we do not have any load, we cannot do the tree structured merge + if (!TheLoad) + return std::nullopt; + + // If we do not have multiple stores, we cannot do the tree structured merge + if (StoreInfos.size() < 2) + return std::nullopt; + + // Stores should not overlap and should cover the whole alloca + // Sort by begin offset + llvm::sort(StoreInfos, [](const StoreInfo &A, const StoreInfo &B) { + return A.BeginOffset < B.BeginOffset; + }); + + // Check for overlaps and coverage + uint64_t ExpectedStart = NewAllocaBeginOffset; + for (auto &StoreInfo : StoreInfos) { + uint64_t BeginOff = StoreInfo.BeginOffset; + uint64_t EndOff = StoreInfo.EndOffset; + + // Check for gap or overlap + if (BeginOff != ExpectedStart) + return std::nullopt; + + ExpectedStart = EndOff; + } + // Check that stores cover the entire alloca + if (ExpectedStart != NewAllocaEndOffset) + return std::nullopt; + + // Stores should be in the same basic block + // The load should not be in the middle of the stores + // Note: + // If the load is in a different basic block with the stores, we can still + // do the tree structured merge. This is because we do not have the + // store->load forwarding here. The merged vector will be stored back to + // NewAI and the new load will load from NewAI. The forwarding will be + // handled later when we try to promote NewAI. + BasicBlock *LoadBB = TheLoad->getParent(); + BasicBlock *StoreBB = StoreInfos[0].Store->getParent(); + + for (auto &StoreInfo : StoreInfos) { + if (StoreInfo.Store->getParent() != StoreBB) + return std::nullopt; + if (LoadBB == StoreBB && !StoreInfo.Store->comesBefore(TheLoad)) + return std::nullopt; + } + + // If we reach here, the partition can be merged with a tree structured + // merge + LLVM_DEBUG({ + dbgs() << "Tree structured merge rewrite:\n Load: " << *TheLoad + << "\n Ordered stores:\n"; + for (auto [i, Info] : enumerate(StoreInfos)) + dbgs() << " [" << i << "] Range[" << Info.BeginOffset << ", " + << Info.EndOffset << ") \tStore: " << *Info.Store + << "\tValue: " << *Info.StoredValue << "\n"; + }); + + // Instead of having these stores, we merge all the stored values into a + // vector and store the merged value into the alloca + std::queue VecElements; + IRBuilder<> Builder(StoreInfos.back().Store); + for (const auto &Info : StoreInfos) { + DeletedValues.push_back(Info.Store); + VecElements.push(Info.StoredValue); + } + + LLVM_DEBUG(dbgs() << " Rewrite stores into shufflevectors:\n"); + while (VecElements.size() > 1) { + const auto NumElts = VecElements.size(); + for ([[maybe_unused]] const auto _ : llvm::seq(NumElts / 2)) { + Value *V0 = VecElements.front(); + VecElements.pop(); + Value *V1 = VecElements.front(); + VecElements.pop(); + Value *Merged = mergeTwoVectors(V0, V1, DL, AllocatedEltTy, Builder); + LLVM_DEBUG(dbgs() << " shufflevector: " << *Merged << "\n"); + VecElements.push(Merged); + } + if (NumElts % 2 == 1) { + Value *V = VecElements.front(); + VecElements.pop(); + VecElements.push(V); + } + } + + // Store the merged value into the alloca + Value *MergedValue = VecElements.front(); + Builder.CreateAlignedStore(MergedValue, &NewAI, getSliceAlign()); + + IRBuilder<> LoadBuilder(TheLoad); + TheLoad->replaceAllUsesWith(LoadBuilder.CreateAlignedLoad( + TheLoad->getType(), &NewAI, getSliceAlign(), TheLoad->isVolatile(), + TheLoad->getName() + ".sroa.new.load")); + DeletedValues.push_back(TheLoad); + + return DeletedValues; + } + private: // Make sure the other visit overloads are visible. using Base::visit; @@ -4981,13 +5273,20 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, P.endOffset(), IsIntegerPromotable, VecTy, PHIUsers, SelectUsers); bool Promotable = true; - for (Slice *S : P.splitSliceTails()) { - Promotable &= Rewriter.visit(S); - ++NumUses; - } - for (Slice &S : P) { - Promotable &= Rewriter.visit(&S); - ++NumUses; + // Check whether we can have tree-structured merge. + if (auto DeletedValues = Rewriter.rewriteTreeStructuredMerge(P)) { + NumUses += DeletedValues->size() + 1; + for (Value *V : *DeletedValues) + DeadInsts.push_back(V); + } else { + for (Slice *S : P.splitSliceTails()) { + Promotable &= Rewriter.visit(S); + ++NumUses; + } + for (Slice &S : P) { + Promotable &= Rewriter.visit(&S); + ++NumUses; + } } NumAllocaPartitionUses += NumUses; diff --git a/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll new file mode 100644 index 0000000000000..c858d071451e8 --- /dev/null +++ b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll @@ -0,0 +1,222 @@ +; REQUIRES: asserts +; RUN: opt < %s -passes='sroa' -disable-output -debug-only=sroa 2>&1 | FileCheck %s +; RUN: opt < %s -passes='sroa' -disable-output -debug-only=sroa 2>&1 | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" + +; CHECK-NOT: Tree structured merge rewrite +define i32 @test_alloca_not_fixed_vector() { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store float 1.0, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 1 + store float 2.0, ptr %ptr1 + + %result = load i32, ptr %alloca + ret i32 %result +} + +define <4 x float> @test_more_than_one_load(<2 x float> %a, <2 x float> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %result1 = load <4 x float>, ptr %alloca + %result2 = load <4 x float>, ptr %alloca + + %final = fadd <4 x float> %result1, %result2 + ret <4 x float> %final +} + +define void @test_no_load(<4 x float> %a) { +entry: + %alloca = alloca [4 x float] + store <4 x float> %a, ptr %alloca + ret void +} + +define i32 @test_load_not_fixed_vector(<2 x float> %a, <2 x float> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %result = load i32, ptr %alloca + ret i32 %result +} + +define <3 x float> @test_load_not_covering_alloca(<2 x float> %a, <2 x float> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %result = load <3 x float>, ptr %ptr0 + ret <3 x float> %result +} + +define <4 x float> @test_store_not_fixed_vector( %a) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + %fixed = extractelement %a, i32 0 + store float %fixed, ptr %ptr0 + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + + +define <4 x float> @test_no_stores() { +entry: + %alloca = alloca [4 x float] + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @test_stores_overlapping(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 1 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %c, ptr %ptr2 + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @test_stores_not_covering_alloca(<2 x float> %a) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @test_stores_not_same_basic_block(<2 x float> %a, <2 x float> %b, i1 %cond) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + br i1 %cond, label %then, label %else + +then: + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + br label %merge + +else: + br label %merge + +merge: + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @test_load_before_stores(<2 x float> %a, <2 x float> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %intermediate = load <4 x float>, ptr %alloca + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + ret <4 x float> %intermediate +} + +define <4 x float> @test_other_instructions(<2 x float> %a, <2 x float> %b) { +entry: + %alloca = alloca [4 x float] + + ; Store first vector + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + ; Other instruction (memset) that's not a simple load/store + call void @llvm.memset.p0.i64(ptr %alloca, i8 0, i64 8, i1 false) + + ; Store second vector + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @volatile_stores(<2 x i32> %a, <2 x i32> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store volatile <2 x i32> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store volatile <2 x i32> %b, ptr %ptr1 + + %result = load <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x float> @volatile_loads(<2 x i32> %a, <2 x i32> %b) { +entry: + %alloca = alloca [4 x float] + + %ptr0 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 0 + store <2 x i32> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x float], ptr %alloca, i32 0, i32 2 + store <2 x i32> %b, ptr %ptr1 + + %result = load volatile <4 x float>, ptr %alloca + ret <4 x float> %result +} + +define <4 x i15> @non_byte_aligned_alloca(<2 x i15> %a, <2 x i15> %b) { +entry: + %alloca = alloca [4 x i15] + + %ptr0 = getelementptr inbounds [4 x i15], ptr %alloca, i32 0, i32 0 + store <2 x i15> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [4 x i15], ptr %alloca, i32 0, i32 2 + store <2 x i15> %b, ptr %ptr1 + + %result = load <4 x i15>, ptr %alloca + ret <4 x i15> %result + +} + +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) diff --git a/llvm/test/Transforms/SROA/vector-promotion-via-tree-structure-merge.ll b/llvm/test/Transforms/SROA/vector-promotion-via-tree-structure-merge.ll new file mode 100644 index 0000000000000..8bfe0bb83051e --- /dev/null +++ b/llvm/test/Transforms/SROA/vector-promotion-via-tree-structure-merge.ll @@ -0,0 +1,301 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes='sroa' -S | FileCheck %s --check-prefixes=CHECK,CHECK-PRESERVE-CFG +; RUN: opt < %s -passes='sroa' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" + +; Basic tree-structured merge: 4 stores of <2 x float> into <8 x float> +define <8 x float> @basic_tree_merge(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) { +; CHECK-LABEL: define <8 x float> @basic_tree_merge( +; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x float> [[D:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[C]], <2 x float> [[D]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> [[TMP1]], <8 x i32> +; CHECK-NEXT: ret <8 x float> [[TMP2]] +; +entry: + %alloca = alloca [8 x float] + + %ptr0 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 4 + store <2 x float> %c, ptr %ptr2 + + %ptr3 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 6 + store <2 x float> %d, ptr %ptr3 + + %result = load <8 x float>, ptr %alloca + ret <8 x float> %result +} + +define void @multiple_partitions(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, ptr %e, ptr %f) { +; CHECK-LABEL: define void @multiple_partitions( +; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x float> [[D:%.*]], ptr [[E:%.*]], ptr [[F:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[C]], <2 x float> [[D]], <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[E]], align 16 +; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[F]], align 16 +; CHECK-NEXT: ret void +; +entry: + %alloca = alloca [8 x float] + + %ptr0 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 4 + store <2 x float> %c, ptr %ptr2 + + %ptr3 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 6 + store <2 x float> %d, ptr %ptr3 + + %result1 = load <4 x float>, ptr %alloca + + %ptr_offset4 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 4 + %result2 = load <4 x float>, ptr %ptr_offset4 + + store <4 x float> %result1, ptr %e + store <4 x float> %result2, ptr %f + + ret void +} + +; Out-of-order stores: stores happen in non-sequential order +define <8 x i32> @out_of_order_stores(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { +; CHECK-LABEL: define <8 x i32> @out_of_order_stores( +; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[C]], <2 x i32> [[D]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> +; CHECK-NEXT: ret <8 x i32> [[TMP2]] +; +entry: + %alloca = alloca [8 x i32] + + %ptr2 = getelementptr inbounds [8 x i32], ptr %alloca, i32 0, i32 4 + store <2 x i32> %c, ptr %ptr2 + + %ptr0 = getelementptr inbounds [8 x i32], ptr %alloca, i32 0, i32 0 + store <2 x i32> %a, ptr %ptr0 + + %ptr3 = getelementptr inbounds [8 x i32], ptr %alloca, i32 0, i32 6 + store <2 x i32> %d, ptr %ptr3 + + %ptr1 = getelementptr inbounds [8 x i32], ptr %alloca, i32 0, i32 2 + store <2 x i32> %b, ptr %ptr1 + + %result = load <8 x i32>, ptr %alloca + ret <8 x i32> %result +} + +; Single element stores: 8 stores of <1 x i16> into <8 x i16> +define <8 x i16> @single_element_stores(<1 x i16> %a, <1 x i16> %b, <1 x i16> %c, <1 x i16> %d, <1 x i16> %e, <1 x i16> %f, <1 x i16> %g, <1 x i16> %h) { +; CHECK-LABEL: define <8 x i16> @single_element_stores( +; CHECK-SAME: <1 x i16> [[A:%.*]], <1 x i16> [[B:%.*]], <1 x i16> [[C:%.*]], <1 x i16> [[D:%.*]], <1 x i16> [[E:%.*]], <1 x i16> [[F:%.*]], <1 x i16> [[G:%.*]], <1 x i16> [[H:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <1 x i16> [[A]], <1 x i16> [[B]], <2 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <1 x i16> [[C]], <1 x i16> [[D]], <2 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <1 x i16> [[E]], <1 x i16> [[F]], <2 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <1 x i16> [[G]], <1 x i16> [[H]], <2 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i16> [[TMP0]], <2 x i16> [[TMP1]], <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <8 x i32> +; CHECK-NEXT: ret <8 x i16> [[TMP6]] +; +entry: + %alloca = alloca [8 x i16] + + %ptr0 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 0 + store <1 x i16> %a, ptr %ptr0 + %ptr1 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 1 + store <1 x i16> %b, ptr %ptr1 + %ptr2 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 2 + store <1 x i16> %c, ptr %ptr2 + %ptr3 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 3 + store <1 x i16> %d, ptr %ptr3 + %ptr4 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 4 + store <1 x i16> %e, ptr %ptr4 + %ptr5 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 5 + store <1 x i16> %f, ptr %ptr5 + %ptr6 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 6 + store <1 x i16> %g, ptr %ptr6 + %ptr7 = getelementptr inbounds [8 x i16], ptr %alloca, i32 0, i32 7 + store <1 x i16> %h, ptr %ptr7 + + %result = load <8 x i16>, ptr %alloca + ret <8 x i16> %result +} + +; Non-power-of-2: 3 stores of <2 x float> into <6 x float> +define <6 x float> @non_power_of_2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +; CHECK-LABEL: define <6 x float> @non_power_of_2( +; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[C]], <2 x float> poison, <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> [[TMP1]], <6 x i32> +; CHECK-NEXT: ret <6 x float> [[TMP2]] +; +entry: + %alloca = alloca [6 x float] + + %ptr0 = getelementptr inbounds [6 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [6 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [6 x float], ptr %alloca, i32 0, i32 4 + store <2 x float> %c, ptr %ptr2 + + %result = load <6 x float>, ptr %alloca + ret <6 x float> %result +} + +; Store with different size of vectors +define <7 x float> @store_with_different_size_of_vectors(<1 x float> %a, <4 x float> %b, <2 x float> %c) { +; CHECK-LABEL: define <7 x float> @store_with_different_size_of_vectors( +; CHECK-SAME: <1 x float> [[A:%.*]], <4 x float> [[B:%.*]], <2 x float> [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <1 x float> [[A]], <1 x float> poison, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> [[B]], <5 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[C]], <2 x float> poison, <5 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <5 x float> [[TMP1]], <5 x float> [[TMP2]], <7 x i32> +; CHECK-NEXT: ret <7 x float> [[TMP3]] +; +entry: + %alloca = alloca [7 x float] + + %ptr0 = getelementptr inbounds [7 x float], ptr %alloca, i32 0, i32 0 + store <1 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [7 x float], ptr %alloca, i32 0, i32 1 + store <4 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [7 x float], ptr %alloca, i32 0, i32 5 + store <2 x float> %c, ptr %ptr2 + + %result = load <7 x float>, ptr %alloca + ret <7 x float> %result +} + +; Load and store with different element type +define <4 x double> @load_store_different_element_type(<2 x i32> %a, <2 x float> %b, <2 x float> %c, <2 x i32> %d) { +; CHECK-LABEL: define <4 x double> @load_store_different_element_type( +; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x i32> [[D:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <1 x double> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[B]] to <1 x double> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <1 x double> [[TMP0]], <1 x double> [[TMP1]], <2 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[C]] to <1 x double> +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[D]] to <1 x double> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <1 x double> [[TMP3]], <1 x double> [[TMP4]], <2 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP5]], <4 x i32> +; CHECK-NEXT: ret <4 x double> [[TMP6]] +; +entry: + %alloca = alloca [8 x float] + + %ptr0 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 0 + store <2 x i32> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 4 + store <2 x float> %c, ptr %ptr2 + + %ptr3 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 6 + store <2 x i32> %d, ptr %ptr3 + + %result = load <4 x double>, ptr %alloca + ret <4 x double> %result +} + +define <8 x float> @bitcast_needed(<2 x i32> %a, <2 x i16> %b, <12 x i8> %c, <1 x i64> %d) { +; CHECK-LABEL: define <8 x float> @bitcast_needed( +; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i16> [[B:%.*]], <12 x i8> [[C:%.*]], <1 x i64> [[D:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <2 x float> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i16> [[B]] to <1 x float> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <1 x float> [[TMP1]], <1 x float> poison, <2 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x float> [[TMP0]], <2 x float> [[TMP2]], <3 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = bitcast <12 x i8> [[C]] to <3 x float> +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[D]] to <2 x float> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <3 x float> [[TMP5]], <3 x float> [[TMP9]], <5 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <3 x float> [[TMP3]], <3 x float> poison, <5 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <5 x float> [[TMP8]], <5 x float> [[TMP7]], <8 x i32> +; CHECK-NEXT: ret <8 x float> [[TMP6]] +; +entry: + %alloca = alloca [8 x float] + + %ptr0 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 0 + store <2 x i32> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 2 + store <2 x i16> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 3 + store <12 x i8> %c, ptr %ptr2 + + %ptr3 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 6 + store <1 x i64> %d, ptr %ptr3 + + %result = load <8 x float>, ptr %alloca + ret <8 x float> %result +} + +define <8 x float> @load_in_different_blocks(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, i1 %cond) { +; CHECK-LABEL: define <8 x float> @load_in_different_blocks( +; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x float> [[D:%.*]], i1 [[COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[C]], <2 x float> [[D]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> [[TMP1]], <8 x i32> +; CHECK-NEXT: br i1 [[COND]], label %[[TRUEBRANCH:.*]], label %[[FALSEBRANCH:.*]] +; CHECK: [[TRUEBRANCH]]: +; CHECK-NEXT: br label %[[FALSEBRANCH]] +; CHECK: [[FALSEBRANCH]]: +; CHECK-NEXT: [[RESULT:%.*]] = phi <8 x float> [ poison, %[[ENTRY]] ], [ [[TMP2]], %[[TRUEBRANCH]] ] +; CHECK-NEXT: ret <8 x float> [[RESULT]] +; +entry: + %alloca = alloca [8 x float] + + %ptr0 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 0 + store <2 x float> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 2 + store <2 x float> %b, ptr %ptr1 + + %ptr2 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 4 + store <2 x float> %c, ptr %ptr2 + + %ptr3 = getelementptr inbounds [8 x float], ptr %alloca, i32 0, i32 6 + store <2 x float> %d, ptr %ptr3 + + br i1 %cond, label %TrueBranch, label %FalseBranch + +TrueBranch: + %load1 = load <8 x float>, ptr %alloca + br label %FalseBranch + +FalseBranch: + %result = phi <8 x float> [ poison, %entry ], [ %load1, %TrueBranch ] + ret <8 x float> %result +} + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-MODIFY-CFG: {{.*}} +; CHECK-PRESERVE-CFG: {{.*}}