diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 454fe5a91d375..76878ce4ff88f 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -205,9 +205,9 @@ Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) { if (IsBigEndian) ExtIndexC = NumElts.getKnownMinValue() - 1 - ExtIndexC; unsigned ShiftAmountC = ExtIndexC * DestWidth; - if (!ShiftAmountC || - (isDesirableIntType(X->getType()->getPrimitiveSizeInBits()) && - Ext.getVectorOperand()->hasOneUse())) { + if ((!ShiftAmountC || + isDesirableIntType(X->getType()->getPrimitiveSizeInBits())) && + Ext.getVectorOperand()->hasOneUse()) { if (ShiftAmountC) X = Builder.CreateLShr(X, ShiftAmountC, "extelt.offset"); if (DestTy->isFloatingPointTy()) { diff --git a/llvm/test/Transforms/InstCombine/extractelement.ll b/llvm/test/Transforms/InstCombine/extractelement.ll index 28a4702559c46..2bd719e236137 100644 --- a/llvm/test/Transforms/InstCombine/extractelement.ll +++ b/llvm/test/Transforms/InstCombine/extractelement.ll @@ -722,20 +722,14 @@ define i8 @bitcast_scalar_index_variable(i32 %x, i64 %y) { ret i8 %r } -; extra use is ok if we don't need a shift +; extra use is not ok, even if we don't need a shift define i8 @bitcast_scalar_index0_use(i64 %x) { -; ANYLE-LABEL: @bitcast_scalar_index0_use( -; ANYLE-NEXT: [[V:%.*]] = bitcast i64 [[X:%.*]] to <8 x i8> -; ANYLE-NEXT: call void @use(<8 x i8> [[V]]) -; ANYLE-NEXT: [[R:%.*]] = trunc i64 [[X]] to i8 -; ANYLE-NEXT: ret i8 [[R]] -; -; ANYBE-LABEL: @bitcast_scalar_index0_use( -; ANYBE-NEXT: [[V:%.*]] = bitcast i64 [[X:%.*]] to <8 x i8> -; ANYBE-NEXT: call void @use(<8 x i8> [[V]]) -; ANYBE-NEXT: [[R:%.*]] = extractelement <8 x i8> [[V]], i64 0 -; ANYBE-NEXT: ret i8 [[R]] +; ANY-LABEL: @bitcast_scalar_index0_use( +; ANY-NEXT: [[V:%.*]] = bitcast i64 [[X:%.*]] to <8 x i8> +; ANY-NEXT: call void @use(<8 x i8> [[V]]) +; ANY-NEXT: [[R:%.*]] = extractelement <8 x i8> [[V]], i64 0 +; ANY-NEXT: ret i8 [[R]] ; %v = bitcast i64 %x to <8 x i8> diff --git a/llvm/test/Transforms/InstCombine/vector_insertelt_shuffle.ll b/llvm/test/Transforms/InstCombine/vector_insertelt_shuffle.ll index f745a40364211..ab2a7faa107c7 100644 --- a/llvm/test/Transforms/InstCombine/vector_insertelt_shuffle.ll +++ b/llvm/test/Transforms/InstCombine/vector_insertelt_shuffle.ll @@ -90,4 +90,26 @@ define <4 x float> @bazzzzzz(<4 x float> %x, i32 %a) { ret <4 x float> %ins1 } +; test that foldBitcastExtElt doesn't interfere with shuffle folding + +define <4 x half> @bitcast_extract_insert_to_shuffle(i32 %a, i32 %b) { +; CHECK-LABEL: @bitcast_extract_insert_to_shuffle( +; CHECK-NEXT: [[AVEC:%.*]] = bitcast i32 [[A:%.*]] to <2 x half> +; CHECK-NEXT: [[BVEC:%.*]] = bitcast i32 [[B:%.*]] to <2 x half> +; CHECK-NEXT: [[INS3:%.*]] = shufflevector <2 x half> [[AVEC]], <2 x half> [[BVEC]], <4 x i32> +; CHECK-NEXT: ret <4 x half> [[INS3]] +; + %avec = bitcast i32 %a to <2 x half> + %a0 = extractelement <2 x half> %avec, i32 0 + %a1 = extractelement <2 x half> %avec, i32 1 + %bvec = bitcast i32 %b to <2 x half> + %b0 = extractelement <2 x half> %bvec, i32 0 + %b1 = extractelement <2 x half> %bvec, i32 1 + %ins0 = insertelement <4 x half> undef, half %a0, i32 0 + %ins1 = insertelement <4 x half> %ins0, half %a1, i32 1 + %ins2 = insertelement <4 x half> %ins1, half %b0, i32 2 + %ins3 = insertelement <4 x half> %ins2, half %b1, i32 3 + ret <4 x half> %ins3 +} +