From d91acb89a537d7022fd8cac98cbd4048291f1ddd Mon Sep 17 00:00:00 2001 From: hanbeom Date: Sun, 9 Feb 2025 17:15:53 +0900 Subject: [PATCH 01/10] [VectorCombine] support mismatching extract/insert indices for foldInsExtFNeg insertelt DestVec, (fneg (extractelt SrcVec, Index)), Index -> shuffle DestVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask In previous patches, the above transform was only possible if the Extract/Insert Index was the same; this patch makes the above transform possible even if the two indexes are different. Proof: https://alive2.llvm.org/ce/z/aDfdyG Fixes: https://github.com/llvm/llvm-project/issues/125675 --- .../Transforms/Vectorize/VectorCombine.cpp | 53 +++++++------- .../PhaseOrdering/X86/addsub-inseltpoison.ll | 6 +- .../Transforms/PhaseOrdering/X86/addsub.ll | 6 +- .../VectorCombine/X86/extract-fneg-insert.ll | 69 ++++++++++--------- 4 files changed, 69 insertions(+), 65 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index d6eb00da11dc8..d391dea85143e 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -696,11 +696,11 @@ bool VectorCombine::foldExtractExtract(Instruction &I) { /// shuffle. bool VectorCombine::foldInsExtFNeg(Instruction &I) { // Match an insert (op (extract)) pattern. - Value *DestVec; - uint64_t Index; + Value *DstVec; + uint64_t ExtIdx, InsIdx; Instruction *FNeg; - if (!match(&I, m_InsertElt(m_Value(DestVec), m_OneUse(m_Instruction(FNeg)), - m_ConstantInt(Index)))) + if (!match(&I, m_InsertElt(m_Value(DstVec), m_OneUse(m_Instruction(FNeg)), + m_ConstantInt(InsIdx)))) return false; // Note: This handles the canonical fneg instruction and "fsub -0.0, X". @@ -708,48 +708,49 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { Instruction *Extract; if (!match(FNeg, m_FNeg(m_CombineAnd( m_Instruction(Extract), - m_ExtractElt(m_Value(SrcVec), m_SpecificInt(Index)))))) + m_ExtractElt(m_Value(SrcVec), m_ConstantInt(ExtIdx)))))) return false; - auto *VecTy = cast(I.getType()); - auto *ScalarTy = VecTy->getScalarType(); + auto *DstVecTy = cast(DstVec->getType()); + auto *DstVecScalarTy = DstVecTy->getScalarType(); auto *SrcVecTy = dyn_cast(SrcVec->getType()); - if (!SrcVecTy || ScalarTy != SrcVecTy->getScalarType()) + if (!SrcVecTy || DstVecScalarTy != SrcVecTy->getScalarType()) return false; // Ignore bogus insert/extract index. - unsigned NumElts = VecTy->getNumElements(); - if (Index >= NumElts) + unsigned NumDstElts = DstVecTy->getNumElements(); + unsigned NumSrcElts = SrcVecTy->getNumElements(); + if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1) return false; // We are inserting the negated element into the same lane that we extracted // from. This is equivalent to a select-shuffle that chooses all but the // negated element from the destination vector. - SmallVector Mask(NumElts); + SmallVector Mask(NumDstElts); std::iota(Mask.begin(), Mask.end(), 0); - Mask[Index] = Index + NumElts; + Mask[InsIdx] = (ExtIdx % NumDstElts) + NumDstElts; InstructionCost OldCost = - TTI.getArithmeticInstrCost(Instruction::FNeg, ScalarTy, CostKind) + - TTI.getVectorInstrCost(I, VecTy, CostKind, Index); + TTI.getArithmeticInstrCost(Instruction::FNeg, DstVecScalarTy, CostKind) + + TTI.getVectorInstrCost(I, DstVecTy, CostKind, InsIdx); // If the extract has one use, it will be eliminated, so count it in the // original cost. If it has more than one use, ignore the cost because it will // be the same before/after. if (Extract->hasOneUse()) - OldCost += TTI.getVectorInstrCost(*Extract, VecTy, CostKind, Index); + OldCost += TTI.getVectorInstrCost(*Extract, SrcVecTy, CostKind, ExtIdx); InstructionCost NewCost = - TTI.getArithmeticInstrCost(Instruction::FNeg, VecTy, CostKind) + - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, VecTy, VecTy, - Mask, CostKind); + TTI.getArithmeticInstrCost(Instruction::FNeg, SrcVecTy, CostKind) + + TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, Mask, + CostKind); - bool NeedLenChg = SrcVecTy->getNumElements() != NumElts; + bool NeedLenChg = SrcVecTy->getNumElements() != NumDstElts; // If the lengths of the two vectors are not equal, // we need to add a length-change vector. Add this cost. SmallVector SrcMask; if (NeedLenChg) { - SrcMask.assign(NumElts, PoisonMaskElem); - SrcMask[Index] = Index; + SrcMask.assign(NumDstElts, PoisonMaskElem); + SrcMask[(ExtIdx % NumDstElts)] = ExtIdx; NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy, SrcVecTy, SrcMask, CostKind); } @@ -758,15 +759,15 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { return false; Value *NewShuf; - // insertelt DestVec, (fneg (extractelt SrcVec, Index)), Index + // insertelt DstVec, (fneg (extractelt SrcVec, Index)), Index Value *VecFNeg = Builder.CreateFNegFMF(SrcVec, FNeg); if (NeedLenChg) { - // shuffle DestVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask + // shuffle DstVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask Value *LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask); - NewShuf = Builder.CreateShuffleVector(DestVec, LenChgShuf, Mask); + NewShuf = Builder.CreateShuffleVector(DstVec, LenChgShuf, Mask); } else { - // shuffle DestVec, (fneg SrcVec), Mask - NewShuf = Builder.CreateShuffleVector(DestVec, VecFNeg, Mask); + // shuffle DstVec, (fneg SrcVec), Mask + NewShuf = Builder.CreateShuffleVector(DstVec, VecFNeg, Mask); } replaceValue(I, *NewShuf); diff --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll index 2c1d73eaafc5e..9f3244ded92ff 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll @@ -498,11 +498,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> % ; PR58139 define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) { ; SSE-LABEL: @_mm_complexmult_pd_naive( -; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1 -; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]] ; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0 +; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]] +; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> ; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] ; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer ; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]]) diff --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll index fa6403f3d4267..de64bf2657f72 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll @@ -502,11 +502,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> % ; PR58139 define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) { ; SSE-LABEL: @_mm_complexmult_pd_naive( -; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1 -; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]] ; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0 +; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]] +; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> ; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] ; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer ; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]]) diff --git a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll index 5358e0419e7a7..cbf33b908a0e2 100644 --- a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll +++ b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll @@ -73,17 +73,11 @@ define <2 x double> @ext1_v2f64(<2 x double> %x, <2 x double> %y) { } define <4 x double> @ext1_v2f64v4f64(<2 x double> %x, <4 x double> %y) { -; SSE-LABEL: @ext1_v2f64v4f64( -; SSE-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; SSE-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; SSE-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 1 -; SSE-NEXT: ret <4 x double> [[R]] -; -; AVX-LABEL: @ext1_v2f64v4f64( -; AVX-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] -; AVX-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> -; AVX-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> -; AVX-NEXT: ret <4 x double> [[R]] +; CHECK-LABEL: @ext1_v2f64v4f64( +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> +; CHECK-NEXT: ret <4 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 %n = fneg nsz double %e @@ -105,9 +99,9 @@ define <8 x float> @ext7_v8f32(<8 x float> %x, <8 x float> %y) { define <8 x float> @ext7_v4f32v8f32(<4 x float> %x, <8 x float> %y) { ; CHECK-LABEL: @ext7_v4f32v8f32( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 -; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 7 +; CHECK-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> ; CHECK-NEXT: ret <8 x float> [[R]] ; %e = extractelement <4 x float> %x, i32 3 @@ -141,12 +135,20 @@ define <8 x float> @ext7_v8f32_use1(<8 x float> %x, <8 x float> %y) { } define <8 x float> @ext7_v4f32v8f32_use1(<4 x float> %x, <8 x float> %y) { -; CHECK-LABEL: @ext7_v4f32v8f32_use1( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 -; CHECK-NEXT: call void @use(float [[E]]) -; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3 -; CHECK-NEXT: ret <8 x float> [[R]] +; SSE-LABEL: @ext7_v4f32v8f32_use1( +; SSE-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 +; SSE-NEXT: call void @use(float [[E]]) +; SSE-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X]] +; SSE-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> +; SSE-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> +; SSE-NEXT: ret <8 x float> [[R]] +; +; AVX-LABEL: @ext7_v4f32v8f32_use1( +; AVX-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 +; AVX-NEXT: call void @use(float [[E]]) +; AVX-NEXT: [[N:%.*]] = fneg float [[E]] +; AVX-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3 +; AVX-NEXT: ret <8 x float> [[R]] ; %e = extractelement <4 x float> %x, i32 3 call void @use(float %e) @@ -220,9 +222,8 @@ define <4 x double> @ext_index_var_v2f64v4f64(<2 x double> %x, <4 x double> %y, define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext1_v2f64_ins0( -; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP1]], <2 x i32> ; CHECK-NEXT: ret <2 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 @@ -234,9 +235,9 @@ define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ; Negative test - extract from an index greater than the vector width of the destination define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext3_v4f64v2f64( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x double> [[X:%.*]], i32 3 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <4 x double> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <2 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP2]], <2 x i32> ; CHECK-NEXT: ret <2 x double> [[R]] ; %e = extractelement <4 x double> %x, i32 3 @@ -246,11 +247,17 @@ define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) { } define <4 x double> @ext1_v2f64v4f64_ins0(<2 x double> %x, <4 x double> %y) { -; CHECK-LABEL: @ext1_v2f64v4f64_ins0( -; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0 -; CHECK-NEXT: ret <4 x double> [[R]] +; SSE-LABEL: @ext1_v2f64v4f64_ins0( +; SSE-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> +; SSE-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> +; SSE-NEXT: ret <4 x double> [[R]] +; +; AVX-LABEL: @ext1_v2f64v4f64_ins0( +; AVX-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 +; AVX-NEXT: [[N:%.*]] = fneg nsz double [[E]] +; AVX-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0 +; AVX-NEXT: ret <4 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 %n = fneg nsz double %e From ea149f5c6d492d449cc902bd5e42e886b82b9fc7 Mon Sep 17 00:00:00 2001 From: hanbeom Date: Thu, 27 Feb 2025 03:53:56 +0900 Subject: [PATCH 02/10] add debug message --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index d391dea85143e..20a2019533ff7 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -755,6 +755,9 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { VecTy, SrcVecTy, SrcMask, CostKind); } + LLVM_DEBUG(dbgs() << "Found an insertion of (extract)fneg : " << I + << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost + << "\n"); if (NewCost > OldCost) return false; From 05d486f6cb1f5c1165c56208b534dcbf2b3fb198 Mon Sep 17 00:00:00 2001 From: hanbeom Date: Thu, 27 Feb 2025 03:54:36 +0900 Subject: [PATCH 03/10] add new instructions to worklist --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 20a2019533ff7..536c7f545b44a 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -761,18 +761,24 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { if (NewCost > OldCost) return false; - Value *NewShuf; + Value *NewShuf, *LenChgShuf = nullptr; // insertelt DstVec, (fneg (extractelt SrcVec, Index)), Index Value *VecFNeg = Builder.CreateFNegFMF(SrcVec, FNeg); if (NeedLenChg) { // shuffle DstVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask - Value *LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask); + LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask); NewShuf = Builder.CreateShuffleVector(DstVec, LenChgShuf, Mask); + Worklist.pushValue(LenChgShuf); } else { // shuffle DstVec, (fneg SrcVec), Mask NewShuf = Builder.CreateShuffleVector(DstVec, VecFNeg, Mask); } + if (LenChgShuf) + Worklist.pushValue(LenChgShuf); + + Worklist.pushValue(VecFNeg); + Worklist.pushValue(NewShuf); replaceValue(I, *NewShuf); return true; } From b15fc1fc63888395cbcbc6bb125567d614a3a368 Mon Sep 17 00:00:00 2001 From: hanbeom Date: Fri, 28 Feb 2025 04:04:15 +0900 Subject: [PATCH 04/10] update comment --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 536c7f545b44a..2773a60648525 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -717,7 +717,8 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { if (!SrcVecTy || DstVecScalarTy != SrcVecTy->getScalarType()) return false; - // Ignore bogus insert/extract index. + // Ignore if insert/extract index is out of bounds or destination vector has + // one element unsigned NumDstElts = DstVecTy->getNumElements(); unsigned NumSrcElts = SrcVecTy->getNumElements(); if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1) From 1a7640cea73c6cbab3f9d47c3c32f986d704447a Mon Sep 17 00:00:00 2001 From: hanbeom Date: Fri, 28 Feb 2025 04:04:54 +0900 Subject: [PATCH 05/10] remove unnecessary parentheses --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 2773a60648525..e0ba2a9a29f1b 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -751,7 +751,7 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { SmallVector SrcMask; if (NeedLenChg) { SrcMask.assign(NumDstElts, PoisonMaskElem); - SrcMask[(ExtIdx % NumDstElts)] = ExtIdx; + SrcMask[ExtIdx % NumDstElts] = ExtIdx; NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy, SrcVecTy, SrcMask, CostKind); } From c315e4efd7bccb439dcd2fb0302d1be3cf765570 Mon Sep 17 00:00:00 2001 From: hanbeom Date: Fri, 28 Feb 2025 04:07:10 +0900 Subject: [PATCH 06/10] Fix miss & misused Worklist --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index e0ba2a9a29f1b..92730e54217d1 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -775,11 +775,7 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { NewShuf = Builder.CreateShuffleVector(DstVec, VecFNeg, Mask); } - if (LenChgShuf) - Worklist.pushValue(LenChgShuf); - Worklist.pushValue(VecFNeg); - Worklist.pushValue(NewShuf); replaceValue(I, *NewShuf); return true; } From 6e08857a2840cfb2ec11c56fe70bfd5b8d60cc4f Mon Sep 17 00:00:00 2001 From: Hanbum Park Date: Tue, 4 Nov 2025 23:30:30 +0900 Subject: [PATCH 07/10] fix wrong boundary check --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 92730e54217d1..f61e29e78fe0e 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -721,7 +721,8 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { // one element unsigned NumDstElts = DstVecTy->getNumElements(); unsigned NumSrcElts = SrcVecTy->getNumElements(); - if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1) + if (ExtIdx > NumSrcElts || InsIdx >= NumDstElts || + NumDstElts == 1) return false; // We are inserting the negated element into the same lane that we extracted From 87f432c7402d5f2d6894c64554cc96a4d702ede3 Mon Sep 17 00:00:00 2001 From: Hanbum Park Date: Wed, 5 Nov 2025 11:43:34 +0900 Subject: [PATCH 08/10] Apply the modified shufflecost parameter --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index f61e29e78fe0e..444238c8e4872 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -743,7 +743,7 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { InstructionCost NewCost = TTI.getArithmeticInstrCost(Instruction::FNeg, SrcVecTy, CostKind) + - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, Mask, + TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, DstVecTy, Mask, CostKind); bool NeedLenChg = SrcVecTy->getNumElements() != NumDstElts; @@ -754,7 +754,7 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { SrcMask.assign(NumDstElts, PoisonMaskElem); SrcMask[ExtIdx % NumDstElts] = ExtIdx; NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, - VecTy, SrcVecTy, SrcMask, CostKind); + DstVecTy, SrcVecTy, SrcMask, CostKind); } LLVM_DEBUG(dbgs() << "Found an insertion of (extract)fneg : " << I From 61bc3e039ea9ea455da565c9c2c3f7c8fa156005 Mon Sep 17 00:00:00 2001 From: Hanbum Park Date: Wed, 5 Nov 2025 13:04:06 +0900 Subject: [PATCH 09/10] formatting --- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 444238c8e4872..49c1f63209541 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -721,8 +721,7 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { // one element unsigned NumDstElts = DstVecTy->getNumElements(); unsigned NumSrcElts = SrcVecTy->getNumElements(); - if (ExtIdx > NumSrcElts || InsIdx >= NumDstElts || - NumDstElts == 1) + if (ExtIdx > NumSrcElts || InsIdx >= NumDstElts || NumDstElts == 1) return false; // We are inserting the negated element into the same lane that we extracted @@ -743,8 +742,8 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { InstructionCost NewCost = TTI.getArithmeticInstrCost(Instruction::FNeg, SrcVecTy, CostKind) + - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, DstVecTy, Mask, - CostKind); + TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, + DstVecTy, Mask, CostKind); bool NeedLenChg = SrcVecTy->getNumElements() != NumDstElts; // If the lengths of the two vectors are not equal, From 4ca25c8b7242b24884ed010a9059b10a442d0276 Mon Sep 17 00:00:00 2001 From: Hanbum Park Date: Thu, 6 Nov 2025 14:51:51 +0900 Subject: [PATCH 10/10] Add tests for vectors that NumElement of Dst is bigger than Src --- .../VectorCombine/X86/extract-fneg-insert.ll | 81 ++++++++++++++++++- 1 file changed, 77 insertions(+), 4 deletions(-) diff --git a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll index cbf33b908a0e2..88fcf359f7c8e 100644 --- a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll +++ b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll @@ -58,6 +58,19 @@ define <4 x float> @ext2_v2f32v4f32(<2 x float> %x, <4 x float> %y) { ret <4 x float> %r } +define <2 x float> @ext2_v4f32v2f32(<4 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @ext2_v4f32v2f32( +; CHECK-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <2 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x float> [[Y:%.*]], <2 x float> [[TMP2]], <2 x i32> +; CHECK-NEXT: ret <2 x float> [[R]] +; + %e = extractelement <4 x float> %x, i32 3 + %n = fneg float %e + %r = insertelement <2 x float> %y, float %n, i32 1 + ret <2 x float> %r +} + ; Eliminating extract/insert is still profitable. Flags propagate. define <2 x double> @ext1_v2f64(<2 x double> %x, <2 x double> %y) { @@ -85,6 +98,19 @@ define <4 x double> @ext1_v2f64v4f64(<2 x double> %x, <4 x double> %y) { ret <4 x double> %r } +define <2 x double> @ext1_v4f64v2f64(<4 x double> %x, <2 x double> %y) { +; CHECK-LABEL: @ext1_v4f64v2f64( +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <4 x double> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <2 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP2]], <2 x i32> +; CHECK-NEXT: ret <2 x double> [[R]] +; + %e = extractelement <4 x double> %x, i32 3 + %n = fneg nsz double %e + %r = insertelement <2 x double> %y, double %n, i32 1 + ret <2 x double> %r +} + define <8 x float> @ext7_v8f32(<8 x float> %x, <8 x float> %y) { ; CHECK-LABEL: @ext7_v8f32( ; CHECK-NEXT: [[TMP1:%.*]] = fneg <8 x float> [[X:%.*]] @@ -110,6 +136,19 @@ define <8 x float> @ext7_v4f32v8f32(<4 x float> %x, <8 x float> %y) { ret <8 x float> %r } +define <4 x float> @ext7_v8f32v4f32(<8 x float> %x, <4 x float> %y) { +; CHECK-LABEL: @ext7_v8f32v4f32( +; CHECK-NEXT: [[TMP1:%.*]] = fneg <8 x float> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> poison, <4 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> [[TMP2]], <4 x i32> +; CHECK-NEXT: ret <4 x float> [[R]] +; + %e = extractelement <8 x float> %x, i32 7 + %n = fneg float %e + %r = insertelement <4 x float> %y, float %n, i32 3 + ret <4 x float> %r +} + ; Same as above with an extra use of the extracted element. define <8 x float> @ext7_v8f32_use1(<8 x float> %x, <8 x float> %y) { @@ -157,6 +196,29 @@ define <8 x float> @ext7_v4f32v8f32_use1(<4 x float> %x, <8 x float> %y) { ret <8 x float> %r } +define <4 x float> @ext7_v8f32v4f32_use1(<8 x float> %x, <4 x float> %y) { +; SSE-LABEL: @ext7_v8f32v4f32_use1( +; SSE-NEXT: [[E:%.*]] = extractelement <8 x float> [[X:%.*]], i32 7 +; SSE-NEXT: call void @use(float [[E]]) +; SSE-NEXT: [[TMP1:%.*]] = fneg <8 x float> [[X]] +; SSE-NEXT: [[TMP2:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> poison, <4 x i32> +; SSE-NEXT: [[R:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> [[TMP2]], <4 x i32> +; SSE-NEXT: ret <4 x float> [[R]] +; +; AVX-LABEL: @ext7_v8f32v4f32_use1( +; AVX-NEXT: [[E:%.*]] = extractelement <8 x float> [[X:%.*]], i32 7 +; AVX-NEXT: call void @use(float [[E]]) +; AVX-NEXT: [[N:%.*]] = fneg float [[E]] +; AVX-NEXT: [[R:%.*]] = insertelement <4 x float> [[Y:%.*]], float [[N]], i32 3 +; AVX-NEXT: ret <4 x float> [[R]] +; + %e = extractelement <8 x float> %x, i32 7 + call void @use(float %e) + %n = fneg float %e + %r = insertelement <4 x float> %y, float %n, i32 3 + ret <4 x float> %r +} + ; Negative test - the transform is likely not profitable if the fneg has another use. define <8 x float> @ext7_v8f32_use2(<8 x float> %x, <8 x float> %y) { @@ -189,6 +251,21 @@ define <8 x float> @ext7_v4f32v8f32_use2(<4 x float> %x, <8 x float> %y) { ret <8 x float> %r } +define <4 x float> @ext7_v8f32v4f32_use2(<8 x float> %x, <4 x float> %y) { +; CHECK-LABEL: @ext7_v8f32v4f32_use2( +; CHECK-NEXT: [[E:%.*]] = extractelement <8 x float> [[X:%.*]], i32 7 +; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] +; CHECK-NEXT: call void @use(float [[N]]) +; CHECK-NEXT: [[R:%.*]] = insertelement <4 x float> [[Y:%.*]], float [[N]], i32 3 +; CHECK-NEXT: ret <4 x float> [[R]] +; + %e = extractelement <8 x float> %x, i32 7 + %n = fneg float %e + call void @use(float %n) + %r = insertelement <4 x float> %y, float %n, i32 3 + ret <4 x float> %r +} + ; Negative test - can't convert variable index to a shuffle. define <2 x double> @ext_index_var_v2f64(<2 x double> %x, <2 x double> %y, i32 %index) { @@ -217,9 +294,6 @@ define <4 x double> @ext_index_var_v2f64v4f64(<2 x double> %x, <4 x double> %y, ret <4 x double> %r } -; Negative test - require same extract/insert index for simple shuffle. -; TODO: We could handle this by adjusting the cost calculation. - define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext1_v2f64_ins0( ; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] @@ -232,7 +306,6 @@ define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ret <2 x double> %r } -; Negative test - extract from an index greater than the vector width of the destination define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext3_v4f64v2f64( ; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <4 x double> [[X:%.*]]