Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16944,6 +16944,23 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
if (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)
return SDValue();

// fold: bitcast(freeze(load)) -> freeze(bitcast(load))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why didn't you try to do this in visitBITCAST and add FREEZE handling into the existing BITCAST(LOAD) fold?

My concern is that we already have had problems with ReplaceAllUsesWith with frozen/unfrozen uses of a node, and worry this might add to it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It will create a conflict with visitFREEZE and enter an infinite loop, see the PR description.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, this will fold only when the FREEZE node has a single use.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@RKSimon Regarding #164618, how does it scale w.r.t. extensions?
I tried to address your comment with the last commit.

And a general question about FREEZE operations- is there a point in the lowering where we should strip them entirely? It seems quite cumbersome that every optimization has to potentially peel these in every use case.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, these are not simply droppable

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#164618 only handles BITCASTS - we need to handle EXT cases separately

// fold: sext(freeze(load)) -> freeze(sext(load))
// fold: zext(freeze(load)) -> freeze(zext(load))
// This allows the conversion to potentially fold into the load.
if (N0.getOpcode() == ISD::LOAD && N->hasOneUse()) {
SDNode *User = *N->user_begin();
unsigned UserOpcode = User->getOpcode();
if (UserOpcode == ISD::BITCAST || UserOpcode == ISD::SIGN_EXTEND ||
UserOpcode == ISD::ZERO_EXTEND) {
SDValue NewConv =
DAG.getNode(UserOpcode, SDLoc(User), User->getValueType(0), N0);
SDValue FrozenConv = DAG.getFreeze(NewConv);
DAG.ReplaceAllUsesWith(User, FrozenConv.getNode());
return SDValue(N, 0);
}
}

// Fold freeze(op(x, ...)) -> op(freeze(x), ...).
// Try to push freeze through instructions that propagate but don't produce
// poison as far as possible. If an operand of freeze follows three
Expand Down
14 changes: 14 additions & 0 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3448,6 +3448,20 @@ bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
return false;

// With low alignment, don't convert integer vectors to large scalar loads,
// because otherwise they get broken into many small scalar loads.
if (LoadVT.isVector() && LoadVT.isInteger() && !BitcastVT.isVector() &&
BitcastVT.isInteger()) {
const DataLayout &DL = DAG.getDataLayout();
unsigned MinAlign = DL.getPointerSize();
// Aligned well, will legalize into a clean sequence of loads.
if (MMO.getAlign() >= MinAlign)
return true;
// Aligned poorly for a large enough scalar.
if (BitcastVT.getSizeInBits() > 2 * DL.getPointerSizeInBits())
return false;
}

// If both types are legal vectors, it's always ok to convert them.
if (LoadVT.isVector() && BitcastVT.isVector() &&
isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
Expand Down
119 changes: 119 additions & 0 deletions llvm/test/CodeGen/AArch64/freeze-bitcast-ext-load.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 | FileCheck %s

define double @test_bitcast_freeze_load(ptr %p) {
; CHECK-LABEL: test_bitcast_freeze_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p
%f = freeze <2 x float> %v
%b = bitcast <2 x float> %f to double
ret double %b
}

define i32 @test_sext_freeze_load_i8(ptr %p) {
; CHECK-LABEL: test_sext_freeze_load_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrsb w0, [x0]
; CHECK-NEXT: ret
%v = load i8, ptr %p
%f = freeze i8 %v
%e = sext i8 %f to i32
ret i32 %e
}

define i64 @test_sext_freeze_load_i32(ptr %p) {
; CHECK-LABEL: test_sext_freeze_load_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w8, [x0]
; CHECK-NEXT: sxtw x0, w8
; CHECK-NEXT: ret
%v = load i32, ptr %p
%f = freeze i32 %v
%e = sext i32 %f to i64
ret i64 %e
}

define i64 @test_sext_freeze_load_i16(ptr %p) {
; CHECK-LABEL: test_sext_freeze_load_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrsh x0, [x0]
; CHECK-NEXT: ret
%v = load i16, ptr %p
%f = freeze i16 %v
%e = sext i16 %f to i64
ret i64 %e
}

define i32 @test_zext_freeze_load_i8(ptr %p) {
; CHECK-LABEL: test_zext_freeze_load_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrb w0, [x0]
; CHECK-NEXT: ret
%v = load i8, ptr %p
%f = freeze i8 %v
%e = zext i8 %f to i32
ret i32 %e
}

define i64 @test_zext_freeze_load_i32(ptr %p) {
; CHECK-LABEL: test_zext_freeze_load_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w0, [x0]
; CHECK-NEXT: ret
%v = load i32, ptr %p
%f = freeze i32 %v
%e = zext i32 %f to i64
ret i64 %e
}

define i64 @test_zext_freeze_load_i16(ptr %p) {
; CHECK-LABEL: test_zext_freeze_load_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrh w0, [x0]
; CHECK-NEXT: ret
%v = load i16, ptr %p
%f = freeze i16 %v
%e = zext i16 %f to i64
ret i64 %e
}

define i32 @test_sext_freeze_load_multiuse(ptr %p) {
; CHECK-LABEL: test_sext_freeze_load_multiuse:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrb w8, [x0]
; CHECK-NEXT: sxtb w9, w8
; CHECK-NEXT: add w0, w9, w8, uxtb
; CHECK-NEXT: ret
%v = load i8, ptr %p
%f = freeze i8 %v
%e = sext i8 %f to i32
%z = zext i8 %f to i32
%r = add i32 %e, %z
ret i32 %r
}

define <4 x i32> @test_sext_freeze_load_v4i16(ptr %p) {
; CHECK-LABEL: test_sext_freeze_load_v4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: sshll v0.4s, v0.4h, #0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %p
%f = freeze <4 x i16> %v
%e = sext <4 x i16> %f to <4 x i32>
ret <4 x i32> %e
}

define <4 x i32> @test_zext_freeze_load_v4i16(ptr %p) {
; CHECK-LABEL: test_zext_freeze_load_v4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-NEXT: ret
%v = load <4 x i16>, ptr %p
%f = freeze <4 x i16> %v
%e = zext <4 x i16> %f to <4 x i32>
ret <4 x i32> %e
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ define <32 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_512(<32 x bfloat> %src,
;
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_512:
; X86: # %bb.0:
; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
; X86-NEXT: vsubbf16 %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xc9,0x5c,0xc2]
; X86-NEXT: vsubbf16 (%eax), %zmm1, %zmm1 # encoding: [0x62,0xf5,0x75,0x48,0x5c,0x08]
; X86-NEXT: vsubbf16 %zmm1, %zmm0, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x5c,0xc1]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/avx10_2bf16-arith.ll
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ define <16 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_256(<16 x bfloat> %src,
;
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_256:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
; X86-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2]
; X86-NEXT: vsubbf16 (%eax), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x08]
; X86-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1]
Expand Down Expand Up @@ -201,8 +201,8 @@ define <8 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_128(<8 x bfloat> %src, <8
;
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_128:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
; X86-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2]
; X86-NEXT: vsubbf16 (%eax), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x08]
; X86-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1]
Expand Down
32 changes: 12 additions & 20 deletions llvm/test/CodeGen/X86/avx512-ext.ll
Original file line number Diff line number Diff line change
Expand Up @@ -212,11 +212,9 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vmovdqu (%rdi), %ymm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
; KNL-NEXT: vpsraw $15, %ymm0, %ymm0
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
Expand All @@ -237,11 +235,9 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm1, %ymm1
Expand All @@ -261,11 +257,9 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vmovdqu (%rdi), %ymm2
; KNL-NEXT: vpmovsxbw %xmm2, %ymm3
; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2
; KNL-NEXT: vpmovsxbw %xmm2, %ymm2
; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; KNL-NEXT: vpmovsxbw (%rdi), %ymm2
; KNL-NEXT: vpmovsxbw 16(%rdi), %ymm3
; KNL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
; KNL-NEXT: vpsraw $15, %ymm0, %ymm0
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
Expand All @@ -286,11 +280,9 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2
; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm3
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %ymm2
; AVX512DQNOBW-NEXT: vpmovsxbw 16(%rdi), %ymm3
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm1, %ymm1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,17 +171,17 @@ define void @load_2byte_chunk_of_4byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movb %al, (%rdx)
; X64-NO-BMI2-NEXT: retq
;
; X64-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movb %al, (%rdx)
; X64-BMI2-NEXT: retq
Expand Down Expand Up @@ -248,17 +248,17 @@ define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
; X64-NO-BMI2-NEXT: retq
;
; X64-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movw %ax, (%rdx)
; X64-BMI2-NEXT: retq
Expand Down Expand Up @@ -324,17 +324,17 @@ define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_4byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
; X64-NO-BMI2-NEXT: retq
;
; X64-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movl %eax, (%rdx)
; X64-BMI2-NEXT: retq
Expand Down
Loading