Skip to content

Commit 78d6491

Browse files
authored
[InterleavedAccess] Construct interleaved access store with shuffles
Cost of interleaved store of 8 factor and 16 factor are cheaper in AArch64 with additional interleave instructions.
1 parent f76c132 commit 78d6491

File tree

9 files changed

+416
-18
lines changed

9 files changed

+416
-18
lines changed

llvm/include/llvm/CodeGen/TargetLowering.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3233,6 +3233,11 @@ class LLVM_ABI TargetLoweringBase {
32333233
/// Default to be the minimum interleave factor: 2.
32343234
virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
32353235

3236+
/// Return true if the target interleave with shuffles are cheaper
3237+
virtual bool isProfitableToInterleaveWithGatherScatter() const {
3238+
return false;
3239+
}
3240+
32363241
/// Lower an interleaved load to target specific intrinsics. Return
32373242
/// true on success.
32383243
///

llvm/lib/CodeGen/InterleavedAccessPass.cpp

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,8 @@ static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
239239
/// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
240240
/// E.g. For a Factor of 2 (LaneLen=4): <0, 4, 1, 5, 2, 6, 3, 7>
241241
static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
242-
unsigned MaxFactor) {
242+
unsigned MaxFactor,
243+
bool InterleaveWithShuffles) {
243244
unsigned NumElts = SVI->getShuffleMask().size();
244245
if (NumElts < 4)
245246
return false;
@@ -250,6 +251,13 @@ static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
250251
return true;
251252
}
252253

254+
if (InterleaveWithShuffles) {
255+
for (unsigned i = 1; MaxFactor * i <= 16; i *= 2) {
256+
Factor = i * MaxFactor;
257+
if (SVI->isInterleave(Factor))
258+
return true;
259+
}
260+
}
253261
return false;
254262
}
255263

@@ -528,7 +536,8 @@ bool InterleavedAccessImpl::lowerInterleavedStore(
528536
cast<FixedVectorType>(SVI->getType())->getNumElements();
529537
// Check if the shufflevector is RE-interleave shuffle.
530538
unsigned Factor;
531-
if (!isReInterleaveMask(SVI, Factor, MaxFactor))
539+
if (!isReInterleaveMask(SVI, Factor, MaxFactor,
540+
TLI->isProfitableToInterleaveWithGatherScatter()))
532541
return false;
533542
assert(NumStoredElements % Factor == 0 &&
534543
"number of stored element should be a multiple of Factor");

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 129 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@
9696
#include <cctype>
9797
#include <cstdint>
9898
#include <cstdlib>
99+
#include <deque>
99100
#include <iterator>
100101
#include <limits>
101102
#include <optional>
@@ -17989,11 +17990,17 @@ bool AArch64TargetLowering::lowerInterleavedStore(Instruction *Store,
1798917990
unsigned Factor,
1799017991
const APInt &GapMask) const {
1799117992

17992-
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
17993-
"Invalid interleave factor");
1799417993
auto *SI = dyn_cast<StoreInst>(Store);
1799517994
if (!SI)
1799617995
return false;
17996+
17997+
if (isProfitableToInterleaveWithGatherScatter() &&
17998+
Factor > getMaxSupportedInterleaveFactor())
17999+
return lowerInterleavedStoreWithShuffle(SI, SVI, Factor);
18000+
18001+
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
18002+
"Invalid interleave factor");
18003+
1799718004
assert(!LaneMask && GapMask.popcount() == Factor &&
1799818005
"Unexpected mask on store");
1799918006

@@ -18139,6 +18146,126 @@ bool AArch64TargetLowering::lowerInterleavedStore(Instruction *Store,
1813918146
return true;
1814018147
}
1814118148

18149+
/// If the interleaved vector elements are greater than supported MaxFactor,
18150+
/// interleaving the data with additional shuffles can be used to
18151+
/// achieve the same.
18152+
///
18153+
/// Consider the following data with 8 interleaves which are shuffled to store
18154+
/// stN instructions. Data needs to be stored in this order:
18155+
/// [v0, v1, v2, v3, v4, v5, v6, v7]
18156+
///
18157+
/// v0 v4 v2 v6 v1 v5 v3 v7
18158+
/// | | | | | | | |
18159+
/// \ / \ / \ / \ /
18160+
/// [zip v0,v4] [zip v2,v6] [zip v1,v5] [zip v3,v7] ==> stN = 4
18161+
/// | | | |
18162+
/// \ / \ /
18163+
/// \ / \ /
18164+
/// \ / \ /
18165+
/// [zip [v0,v2,v4,v6]] [zip [v1,v3,v5,v7]] ==> stN = 2
18166+
///
18167+
/// For stN = 4, upper half of interleaved data V0, V1, V2, V3 is stored
18168+
/// with one st4 instruction. Lower half, i.e, V4, V5, V6, V7 is stored with
18169+
/// another st4.
18170+
///
18171+
/// For stN = 2, upper half of interleaved data V0, V1 is stored
18172+
/// with one st2 instruction. Second set V2, V3 is stored with another st2.
18173+
/// Total of 4 st2's are required here.
18174+
bool AArch64TargetLowering::lowerInterleavedStoreWithShuffle(
18175+
StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const {
18176+
unsigned MaxSupportedFactor = getMaxSupportedInterleaveFactor();
18177+
18178+
auto *VecTy = cast<FixedVectorType>(SVI->getType());
18179+
assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
18180+
18181+
unsigned LaneLen = VecTy->getNumElements() / Factor;
18182+
Type *EltTy = VecTy->getElementType();
18183+
auto *SubVecTy = FixedVectorType::get(EltTy, Factor);
18184+
18185+
const DataLayout &DL = SI->getModule()->getDataLayout();
18186+
bool UseScalable;
18187+
18188+
// Skip if we do not have NEON and skip illegal vector types. We can
18189+
// "legalize" wide vector types into multiple interleaved accesses as long as
18190+
// the vector types are divisible by 128.
18191+
if (!Subtarget->hasNEON() ||
18192+
!isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
18193+
return false;
18194+
18195+
if (UseScalable)
18196+
return false;
18197+
18198+
std::deque<Value *> Shuffles;
18199+
Shuffles.push_back(SVI);
18200+
unsigned ConcatLevel = Factor;
18201+
// Getting all the interleaved operands.
18202+
while (ConcatLevel > 1) {
18203+
unsigned InterleavedOperands = Shuffles.size();
18204+
for (unsigned i = 0; i < InterleavedOperands; i++) {
18205+
ShuffleVectorInst *SFL = dyn_cast<ShuffleVectorInst>(Shuffles.front());
18206+
if (!SFL)
18207+
return false;
18208+
Shuffles.pop_front();
18209+
18210+
Value *Op0 = SFL->getOperand(0);
18211+
Value *Op1 = SFL->getOperand(1);
18212+
18213+
Shuffles.push_back(dyn_cast<Value>(Op0));
18214+
Shuffles.push_back(dyn_cast<Value>(Op1));
18215+
}
18216+
ConcatLevel >>= 1;
18217+
}
18218+
18219+
IRBuilder<> Builder(SI);
18220+
auto Mask = createInterleaveMask(LaneLen, 2);
18221+
SmallVector<int, 16> UpperHalfMask(LaneLen), LowerHalfMask(LaneLen);
18222+
for (unsigned i = 0; i < LaneLen; i++) {
18223+
LowerHalfMask[i] = Mask[i];
18224+
UpperHalfMask[i] = Mask[i + LaneLen];
18225+
}
18226+
18227+
unsigned InterleaveFactor = Factor >> 1;
18228+
while (InterleaveFactor >= MaxSupportedFactor) {
18229+
std::deque<Value *> ShufflesIntermediate;
18230+
ShufflesIntermediate.resize(Factor);
18231+
for (unsigned j = 0; j < Factor; j += (InterleaveFactor * 2)) {
18232+
for (unsigned i = 0; i < InterleaveFactor; i++) {
18233+
auto *Shuffle = Builder.CreateShuffleVector(
18234+
Shuffles[i + j], Shuffles[i + j + InterleaveFactor], LowerHalfMask);
18235+
ShufflesIntermediate[i + j] = Shuffle;
18236+
Shuffle = Builder.CreateShuffleVector(
18237+
Shuffles[i + j], Shuffles[i + j + InterleaveFactor], UpperHalfMask);
18238+
ShufflesIntermediate[i + j + InterleaveFactor] = Shuffle;
18239+
}
18240+
}
18241+
Shuffles = ShufflesIntermediate;
18242+
InterleaveFactor >>= 1;
18243+
}
18244+
18245+
Type *PtrTy = SI->getPointerOperandType();
18246+
auto *STVTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
18247+
18248+
Value *BaseAddr = SI->getPointerOperand();
18249+
Function *StNFunc = getStructuredStoreFunction(
18250+
SI->getModule(), MaxSupportedFactor, UseScalable, STVTy, PtrTy);
18251+
for (unsigned i = 0; i < (Factor / MaxSupportedFactor); i++) {
18252+
SmallVector<Value *, 5> Ops;
18253+
for (unsigned j = 0; j < MaxSupportedFactor; j++)
18254+
Ops.push_back(Shuffles[i * MaxSupportedFactor + j]);
18255+
18256+
if (i > 0) {
18257+
// We will compute the pointer operand of each store from the original
18258+
// base address using GEPs. Cast the base address to a pointer to the
18259+
// scalar element type.
18260+
BaseAddr = Builder.CreateConstGEP1_32(
18261+
SubVecTy->getElementType(), BaseAddr, LaneLen * MaxSupportedFactor);
18262+
}
18263+
Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
18264+
Builder.CreateCall(StNFunc, Ops);
18265+
}
18266+
return true;
18267+
}
18268+
1814218269
bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
1814318270
Instruction *Load, Value *Mask, IntrinsicInst *DI) const {
1814418271
const unsigned Factor = getDeinterleaveIntrinsicFactor(DI->getIntrinsicID());

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,10 @@ class AArch64TargetLowering : public TargetLowering {
229229

230230
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
231231

232+
bool isProfitableToInterleaveWithGatherScatter() const override {
233+
return true;
234+
}
235+
232236
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
233237

234238
bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
@@ -239,6 +243,9 @@ class AArch64TargetLowering : public TargetLowering {
239243
ShuffleVectorInst *SVI, unsigned Factor,
240244
const APInt &GapMask) const override;
241245

246+
bool lowerInterleavedStoreWithShuffle(StoreInst *SI, ShuffleVectorInst *SVI,
247+
unsigned Factor) const;
248+
242249
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask,
243250
IntrinsicInst *DI) const override;
244251

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4922,19 +4922,47 @@ InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
49224922
if (!VecTy->isScalableTy() && (UseMaskForCond || UseMaskForGaps))
49234923
return InstructionCost::getInvalid();
49244924

4925-
if (!UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) {
4925+
unsigned NumLoadStores = 1;
4926+
InstructionCost ShuffleCost = 0;
4927+
bool isInterleaveWithShuffle = false;
4928+
unsigned MaxSupportedFactor = TLI->getMaxSupportedInterleaveFactor();
4929+
4930+
auto *SubVecTy =
4931+
VectorType::get(VecVTy->getElementType(),
4932+
VecVTy->getElementCount().divideCoefficientBy(Factor));
4933+
4934+
if (TLI->isProfitableToInterleaveWithGatherScatter() &&
4935+
Opcode == Instruction::Store && (0 == Factor % MaxSupportedFactor) &&
4936+
Factor > MaxSupportedFactor) {
4937+
isInterleaveWithShuffle = true;
4938+
SmallVector<int, 16> Mask;
4939+
// preparing interleave Mask.
4940+
for (unsigned i = 0; i < VecVTy->getElementCount().getKnownMinValue() / 2;
4941+
i++) {
4942+
for (unsigned j = 0; j < 2; j++)
4943+
Mask.push_back(j * Factor + i);
4944+
}
4945+
4946+
NumLoadStores = Factor / MaxSupportedFactor;
4947+
ShuffleCost =
4948+
(Factor * getShuffleCost(TargetTransformInfo::SK_Splice, VecVTy, VecVTy,
4949+
Mask, CostKind, 0, SubVecTy));
4950+
}
4951+
4952+
if (!UseMaskForGaps &&
4953+
(Factor <= MaxSupportedFactor || isInterleaveWithShuffle)) {
49264954
unsigned MinElts = VecVTy->getElementCount().getKnownMinValue();
4927-
auto *SubVecTy =
4928-
VectorType::get(VecVTy->getElementType(),
4929-
VecVTy->getElementCount().divideCoefficientBy(Factor));
49304955

49314956
// ldN/stN only support legal vector types of size 64 or 128 in bits.
49324957
// Accesses having vector types that are a multiple of 128 bits can be
49334958
// matched to more than one ldN/stN instruction.
49344959
bool UseScalable;
49354960
if (MinElts % Factor == 0 &&
49364961
TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
4937-
return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
4962+
return (Factor *
4963+
TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable) *
4964+
NumLoadStores) +
4965+
ShuffleCost;
49384966
}
49394967

49404968
return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,

llvm/test/CodeGen/AArch64/vldn_shuffle.ll

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -730,6 +730,111 @@ entry:
730730
ret void
731731
}
732732

733+
define void @store_factor8(ptr %ptr, <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3,
734+
<4 x i32> %a4, <4 x i32> %a5, <4 x i32> %a6, <4 x i32> %a7) {
735+
; CHECK-LABEL: store_factor8:
736+
; CHECK: .Lfunc_begin17:
737+
; CHECK-NEXT: .cfi_startproc
738+
; CHECK-NEXT: // %bb.0:
739+
; CHECK: zip1 [[V1:.*s]], [[I1:.*s]], [[I5:.*s]]
740+
; CHECK-NEXT: zip2 [[V5:.*s]], [[I1]], [[I5]]
741+
; CHECK-NEXT: zip1 [[V2:.*s]], [[I2:.*s]], [[I6:.*s]]
742+
; CHECK-NEXT: zip2 [[V6:.*s]], [[I2]], [[I6]]
743+
; CHECK-NEXT: zip1 [[V3:.*s]], [[I3:.*s]], [[I7:.*s]]
744+
; CHECK-NEXT: zip2 [[V7:.*s]], [[I3]], [[I7]]
745+
; CHECK-NEXT: zip1 [[V4:.*s]], [[I4:.*s]], [[I8:.*s]]
746+
; CHECK-NEXT: zip2 [[V8:.*s]], [[I4]], [[I8]]
747+
; CHECK-NEXT: st4 { [[V1]], [[V2]], [[V3]], [[V4]] }, [x0], #64
748+
; CHECK-NEXT: st4 { [[V5]], [[V6]], [[V7]], [[V8]] }, [x0]
749+
; CHECK-NEXT: ret
750+
751+
%v0 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
752+
%v1 = shufflevector <4 x i32> %a2, <4 x i32> %a3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
753+
%v2 = shufflevector <4 x i32> %a4, <4 x i32> %a5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
754+
%v3 = shufflevector <4 x i32> %a6, <4 x i32> %a7, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
755+
756+
%s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
757+
%s1 = shufflevector <8 x i32> %v2, <8 x i32> %v3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
758+
759+
%interleaved.vec = shufflevector <16 x i32> %s0, <16 x i32> %s1, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
760+
store <32 x i32> %interleaved.vec, ptr %ptr, align 4
761+
ret void
762+
}
763+
764+
define void @store_factor16(ptr %ptr, <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3,
765+
<4 x i32> %a4, <4 x i32> %a5, <4 x i32> %a6, <4 x i32> %a7,
766+
<4 x i32> %a8, <4 x i32> %a9, <4 x i32> %a10, <4 x i32> %a11,
767+
<4 x i32> %a12, <4 x i32> %a13, <4 x i32> %a14, <4 x i32> %a15) {
768+
; CHECK-LABEL: store_factor16:
769+
; CHECK: .Lfunc_begin18:
770+
; CHECK-NEXT: .cfi_startproc
771+
; CHECK-NEXT: // %bb.0:
772+
; CHECK: zip1 [[V05:.*s]], [[I05:.*s]], [[I13:.*s]]
773+
; CHECK-NEXT: zip1 [[V01:.*s]], [[I01:.*s]], [[I09:.*s]]
774+
; CHECK-NEXT: zip1 [[V02:.*s]], [[I02:.*s]], [[I10:.*s]]
775+
; CHECK-NEXT: zip1 [[V06:.*s]], [[I06:.*s]], [[I14:.*s]]
776+
; CHECK-NEXT: zip1 [[V07:.*s]], [[I07:.*s]], [[I15:.*s]]
777+
; CHECK-NEXT: zip2 [[V09:.*s]], [[I01]], [[I09]]
778+
; CHECK-NEXT: zip2 [[V13:.*s]], [[I05]], [[I13]]
779+
; CHECK-NEXT: zip1 [[V03:.*s]], [[I03:.*s]], [[I11:.*s]]
780+
; CHECK-NEXT: zip1 [[V04:.*s]], [[I04:.*s]], [[I12:.*s]]
781+
; CHECK-NEXT: zip1 [[V08:.*s]], [[I08:.*s]], [[I16:.*s]]
782+
; CHECK-NEXT: zip2 [[V10:.*s]], [[I02]], [[I10]]
783+
; CHECK-NEXT: zip2 [[V14:.*s]], [[I06]], [[I14]]
784+
; CHECK-NEXT: zip2 [[V11:.*s]], [[I03]], [[I11]]
785+
; CHECK-NEXT: zip1 [[V17:.*s]], [[V01]], [[V05]]
786+
; CHECK-NEXT: zip2 [[V15:.*s]], [[I07]], [[I15]]
787+
; CHECK-NEXT: zip2 [[V21:.*s]], [[V01]], [[V05]]
788+
; CHECK-NEXT: zip1 [[V18:.*s]], [[V02]], [[V06]]
789+
; CHECK-NEXT: zip2 [[V12:.*s]], [[I04]], [[I12]]
790+
; CHECK-NEXT: zip2 [[V16:.*s]], [[I08]], [[I16]]
791+
; CHECK-NEXT: zip1 [[V19:.*s]], [[V03]], [[V07]]
792+
; CHECK-NEXT: zip2 [[V22:.*s]], [[V02]], [[V06]]
793+
; CHECK-NEXT: zip1 [[V25:.*s]], [[V09]], [[V13]]
794+
; CHECK-NEXT: zip1 [[V20:.*s]], [[V04]], [[V08]]
795+
; CHECK-NEXT: zip2 [[V23:.*s]], [[V03]], [[V07]]
796+
; CHECK-NEXT: zip1 [[V26:.*s]], [[V10]], [[V14]]
797+
; CHECK-NEXT: zip2 [[V29:.*s]], [[V09]], [[V13]]
798+
; CHECK-NEXT: zip2 [[V24:.*s]], [[V04]], [[V08]]
799+
; CHECK-NEXT: zip1 [[V27:.*s]], [[V11]], [[V15]]
800+
; CHECK-NEXT: zip2 [[V30:.*s]], [[V10]], [[V14]]
801+
; CHECK-NEXT: zip1 [[V28:.*s]], [[V12]], [[V16]]
802+
; CHECK-NEXT: zip2 [[V31:.*s]], [[V11]], [[V15]]
803+
; CHECK-NEXT: zip2 [[V32:.*s]], [[V12]], [[V16]]
804+
; CHECK-NEXT: st4 { [[V17]], [[V18]], [[V19]], [[V20]] }, [x8], #64
805+
; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
806+
; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
807+
; CHECK-NEXT: st4 { [[V21]], [[V22]], [[V23]], [[V24]] }, [x8]
808+
; CHECK-NEXT: add x8, x0, #128
809+
; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
810+
; CHECK-NEXT: st4 { [[V25]], [[V26]], [[V27]], [[V28]] }, [x8]
811+
; CHECK-NEXT: add x8, x0, #192
812+
; CHECK-NEXT: st4 { [[V29]], [[V30]], [[V31]], [[V32]] }, [x8]
813+
; CHECK-NEXT: ldp d15, d14, [sp], #64 // 16-byte Folded Reload
814+
; CHECK-NEXT: ret
815+
816+
%v0 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
817+
%v1 = shufflevector <4 x i32> %a2, <4 x i32> %a3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
818+
%v2 = shufflevector <4 x i32> %a4, <4 x i32> %a5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
819+
%v3 = shufflevector <4 x i32> %a6, <4 x i32> %a7, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
820+
%v4 = shufflevector <4 x i32> %a8, <4 x i32> %a9, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
821+
%v5 = shufflevector <4 x i32> %a10, <4 x i32> %a11, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
822+
%v6 = shufflevector <4 x i32> %a12, <4 x i32> %a13, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
823+
%v7 = shufflevector <4 x i32> %a14, <4 x i32> %a15, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
824+
825+
%s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
826+
%s1 = shufflevector <8 x i32> %v2, <8 x i32> %v3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
827+
%s2 = shufflevector <8 x i32> %v4, <8 x i32> %v5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
828+
%s3 = shufflevector <8 x i32> %v6, <8 x i32> %v7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
829+
830+
%d0 = shufflevector <16 x i32> %s0, <16 x i32> %s1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
831+
%d1 = shufflevector <16 x i32> %s2, <16 x i32> %s3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
832+
833+
%interleaved.vec = shufflevector <32 x i32> %d0, <32 x i32> %d1, <64 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
834+
store <64 x i32> %interleaved.vec, ptr %ptr, align 4
835+
ret void
836+
}
837+
733838
declare void @llvm.dbg.value(metadata, metadata, metadata)
734839

735840
!llvm.dbg.cu = !{!0}

0 commit comments

Comments
 (0)