Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Nov 1, 2025

No description provided.

Copy link
Contributor Author

arsenm commented Nov 1, 2025

Warning

This pull request is not mergeable via GitHub because a downstack PR is open. Once all requirements are satisfied, merge this PR as a stack on Graphite.
Learn more

This stack of pull requests is managed by Graphite. Learn more about stacking.

@llvmbot
Copy link
Member

llvmbot commented Nov 1, 2025

@llvm/pr-subscribers-llvm-transforms
@llvm/pr-subscribers-llvm-regalloc

@llvm/pr-subscribers-backend-x86

Author: Matt Arsenault (arsenm)

Changes

Patch is 612.50 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/165957.diff

37 Files Affected:

  • (modified) llvm/lib/Target/X86/X86Subtarget.h (+2)
  • (modified) llvm/test/CodeGen/X86/3addr-16bit.ll (+24-24)
  • (modified) llvm/test/CodeGen/X86/atomic-rm-bit-test.ll (+13-9)
  • (modified) llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll (+2-1)
  • (modified) llvm/test/CodeGen/X86/bitcast-vector-bool.ll (+16-16)
  • (modified) llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll (+2-2)
  • (modified) llvm/test/CodeGen/X86/fold-loop-of-urem.ll (+38-43)
  • (modified) llvm/test/CodeGen/X86/freeze-binary.ll (+14-12)
  • (modified) llvm/test/CodeGen/X86/i128-mul.ll (+87-91)
  • (modified) llvm/test/CodeGen/X86/icmp-abs-C.ll (+11-11)
  • (modified) llvm/test/CodeGen/X86/masked_gather_scatter.ll (+6-6)
  • (modified) llvm/test/CodeGen/X86/midpoint-int.ll (+14-14)
  • (modified) llvm/test/CodeGen/X86/mmx-arith.ll (+1-2)
  • (modified) llvm/test/CodeGen/X86/mul-constant-i16.ll (+4-4)
  • (modified) llvm/test/CodeGen/X86/mul-constant-i32.ll (+8-8)
  • (modified) llvm/test/CodeGen/X86/mul-constant-i8.ll (+2-2)
  • (modified) llvm/test/CodeGen/X86/optimize-max-0.ll (+107-104)
  • (modified) llvm/test/CodeGen/X86/parity.ll (+15-15)
  • (modified) llvm/test/CodeGen/X86/rotate-extract.ll (+2-2)
  • (modified) llvm/test/CodeGen/X86/smul_fix.ll (+4-4)
  • (modified) llvm/test/CodeGen/X86/sshl_sat.ll (+20-20)
  • (modified) llvm/test/CodeGen/X86/sshl_sat_vec.ll (+56-57)
  • (modified) llvm/test/CodeGen/X86/stackmap.ll (+6-3)
  • (modified) llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll (+105-105)
  • (modified) llvm/test/CodeGen/X86/twoaddr-lea.ll (+1-1)
  • (modified) llvm/test/CodeGen/X86/umul_fix.ll (+4-4)
  • (modified) llvm/test/CodeGen/X86/ushl_sat.ll (+14-14)
  • (modified) llvm/test/CodeGen/X86/ushl_sat_vec.ll (+55-56)
  • (modified) llvm/test/CodeGen/X86/vector-mulfix-legalize.ll (+17-17)
  • (modified) llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll (+80-80)
  • (modified) llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll (+3023-3058)
  • (modified) llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll (+668-676)
  • (modified) llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll (+165-163)
  • (modified) llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll (+49-46)
  • (modified) llvm/test/CodeGen/X86/x86-shrink-wrapping.ll (+9-9)
  • (modified) llvm/test/CodeGen/X86/xor.ll (+66-66)
  • (modified) llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll (+11-10)
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 868f41375b96b..4f5aadca361fe 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -419,6 +419,8 @@ class X86Subtarget final : public X86GenSubtargetInfo {
   /// Enable the MachineScheduler pass for all X86 subtargets.
   bool enableMachineScheduler() const override { return true; }
 
+  bool enableTerminalRule() const override { return true; }
+
   bool enableEarlyIfConversion() const override;
 
   void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
diff --git a/llvm/test/CodeGen/X86/3addr-16bit.ll b/llvm/test/CodeGen/X86/3addr-16bit.ll
index c9390d91d59c2..2b692bff0461e 100644
--- a/llvm/test/CodeGen/X86/3addr-16bit.ll
+++ b/llvm/test/CodeGen/X86/3addr-16bit.ll
@@ -10,27 +10,27 @@ define zeroext i16 @test1(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
 ; X64-LABEL: test1:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    incl %eax
-; X64-NEXT:    cmpw %di, %si
+; X64-NEXT:    incl %esi
+; X64-NEXT:    cmpw %di, %ax
 ; X64-NEXT:    jne LBB0_2
 ; X64-NEXT:  ## %bb.1: ## %bb
 ; X64-NEXT:    pushq %rbx
-; X64-NEXT:    movzwl %ax, %ebx
+; X64-NEXT:    movzwl %si, %ebx
 ; X64-NEXT:    movl %ebx, %edi
 ; X64-NEXT:    callq _foo
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ; X64-NEXT:  LBB0_2: ## %bb1
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test1:
 ; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    incl %eax
 ; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %cx
 ; X86-NEXT:    jne LBB0_2
@@ -63,27 +63,27 @@ define zeroext i16 @test2(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
 ; X64-LABEL: test2:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    decl %eax
-; X64-NEXT:    cmpw %di, %si
+; X64-NEXT:    decl %esi
+; X64-NEXT:    cmpw %di, %ax
 ; X64-NEXT:    jne LBB1_2
 ; X64-NEXT:  ## %bb.1: ## %bb
 ; X64-NEXT:    pushq %rbx
-; X64-NEXT:    movzwl %ax, %ebx
+; X64-NEXT:    movzwl %si, %ebx
 ; X64-NEXT:    movl %ebx, %edi
 ; X64-NEXT:    callq _foo
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ; X64-NEXT:  LBB1_2: ## %bb1
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test2:
 ; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    decl %eax
 ; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %cx
 ; X86-NEXT:    jne LBB1_2
@@ -118,27 +118,27 @@ define zeroext i16 @test3(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
 ; X64-LABEL: test3:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    addl $2, %eax
-; X64-NEXT:    cmpw %di, %si
+; X64-NEXT:    addl $2, %esi
+; X64-NEXT:    cmpw %di, %ax
 ; X64-NEXT:    jne LBB2_2
 ; X64-NEXT:  ## %bb.1: ## %bb
 ; X64-NEXT:    pushq %rbx
-; X64-NEXT:    movzwl %ax, %ebx
+; X64-NEXT:    movzwl %si, %ebx
 ; X64-NEXT:    movl %ebx, %edi
 ; X64-NEXT:    callq _foo
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ; X64-NEXT:  LBB2_2: ## %bb1
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test3:
 ; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    addl $2, %eax
 ; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %cx
 ; X86-NEXT:    jne LBB2_2
@@ -171,19 +171,19 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
 ; X64-LABEL: test4:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    addl %edi, %eax
-; X64-NEXT:    cmpw %di, %si
+; X64-NEXT:    addl %edi, %esi
+; X64-NEXT:    cmpw %di, %ax
 ; X64-NEXT:    jne LBB3_2
 ; X64-NEXT:  ## %bb.1: ## %bb
 ; X64-NEXT:    pushq %rbx
-; X64-NEXT:    movzwl %ax, %ebx
+; X64-NEXT:    movzwl %si, %ebx
 ; X64-NEXT:    movl %ebx, %edi
 ; X64-NEXT:    callq _foo
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ; X64-NEXT:  LBB3_2: ## %bb1
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test4:
@@ -191,8 +191,8 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %edx
 ; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    cmpw %cx, %dx
 ; X86-NEXT:    jne LBB3_2
diff --git a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
index b4d40fee01e41..71887e369bd18 100644
--- a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
@@ -2156,15 +2156,17 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
 ; X64-LABEL: atomic_shl1_mask01_xor_16_gpr_brz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    andb $15, %cl
-; X64-NEXT:    movl $1, %edx
-; X64-NEXT:    shll %cl, %edx
+; X64-NEXT:    movl $1, %esi
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB34_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    xorl %edx, %ecx
+; X64-NEXT:    xorl %esi, %ecx
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    lock cmpxchgw %cx, (%rdi)
 ; X64-NEXT:    # kill: def $ax killed $ax def $eax
@@ -2172,12 +2174,12 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    movzwl %ax, %ecx
 ; X64-NEXT:    movw $123, %ax
-; X64-NEXT:    testl %ecx, %edx
+; X64-NEXT:    testl %ecx, %esi
 ; X64-NEXT:    je .LBB34_3
 ; X64-NEXT:  # %bb.4: # %return
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB34_3: # %if.then
-; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    movzwl %dx, %eax
 ; X64-NEXT:    movzwl (%rdi,%rax,2), %eax
 ; X64-NEXT:    retq
 entry:
@@ -3398,10 +3400,12 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
 ; X64-LABEL: atomic_shl1_mask01_and_16_gpr_brnz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    andb $15, %cl
-; X64-NEXT:    movl $1, %edx
-; X64-NEXT:    shll %cl, %edx
+; X64-NEXT:    movl $1, %esi
+; X64-NEXT:    shll %cl, %esi
 ; X64-NEXT:    movl $-2, %r8d
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    roll %cl, %r8d
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    .p2align 4
@@ -3415,10 +3419,10 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    jne .LBB52_1
 ; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    movzwl %ax, %eax
-; X64-NEXT:    testl %eax, %edx
+; X64-NEXT:    testl %eax, %esi
 ; X64-NEXT:    je .LBB52_3
 ; X64-NEXT:  # %bb.4: # %if.then
-; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    movzwl %dx, %eax
 ; X64-NEXT:    movzwl (%rdi,%rax,2), %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB52_3:
diff --git a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
index 105ee7f82ee79..e118f5dbc1534 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
@@ -46,8 +46,9 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_align4(ptr addrspace(1) %ptr, <2 x
 ; CHECK-NEXT:    orl %edx, %eax
 ; CHECK-NEXT:    lock cmpxchgl %ecx, (%rbx)
 ; CHECK-NEXT:    setne %cl
-; CHECK-NEXT:    pinsrw $0, %eax, %xmm0
+; CHECK-NEXT:    movl %eax, %edx
 ; CHECK-NEXT:    shrl $16, %eax
+; CHECK-NEXT:    pinsrw $0, %edx, %xmm0
 ; CHECK-NEXT:    pinsrw $0, %eax, %xmm1
 ; CHECK-NEXT:    testb %cl, %cl
 ; CHECK-NEXT:    jne .LBB0_1
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 86d7df0c2d648..fae1ff90dd8d5 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -216,8 +216,8 @@ define i1 @trunc_v8i16_cmp(<8 x i16> %a0) nounwind {
 define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
 ; SSE-LABEL: bitcast_v16i8_to_v2i8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmovmskb %xmm0, %ecx
-; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    movl %eax, %ecx
 ; SSE-NEXT:    shrl $8, %eax
 ; SSE-NEXT:    addb %cl, %al
 ; SSE-NEXT:    # kill: def $al killed $al killed $eax
@@ -225,8 +225,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
 ;
 ; AVX12-LABEL: bitcast_v16i8_to_v2i8:
 ; AVX12:       # %bb.0:
-; AVX12-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX12-NEXT:    movl %ecx, %eax
+; AVX12-NEXT:    vpmovmskb %xmm0, %eax
+; AVX12-NEXT:    movl %eax, %ecx
 ; AVX12-NEXT:    shrl $8, %eax
 ; AVX12-NEXT:    addb %cl, %al
 ; AVX12-NEXT:    # kill: def $al killed $al killed $eax
@@ -441,8 +441,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: bitcast_v16i16_to_v2i8:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %ecx
-; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    movl %eax, %ecx
 ; SSE-NEXT:    shrl $8, %eax
 ; SSE-NEXT:    addb %cl, %al
 ; SSE-NEXT:    # kill: def $al killed $al killed $eax
@@ -452,8 +452,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX1-NEXT:    movl %ecx, %eax
+; AVX1-NEXT:    vpmovmskb %xmm0, %eax
+; AVX1-NEXT:    movl %eax, %ecx
 ; AVX1-NEXT:    shrl $8, %eax
 ; AVX1-NEXT:    addb %cl, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
@@ -464,8 +464,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX2-NEXT:    movl %eax, %ecx
 ; AVX2-NEXT:    shrl $8, %eax
 ; AVX2-NEXT:    addb %cl, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
@@ -762,8 +762,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
 ; SSE-NEXT:    packssdw %xmm3, %xmm2
 ; SSE-NEXT:    packssdw %xmm1, %xmm0
 ; SSE-NEXT:    packsswb %xmm2, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %ecx
-; SSE-NEXT:    movl %ecx, %eax
+; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    movl %eax, %ecx
 ; SSE-NEXT:    shrl $8, %eax
 ; SSE-NEXT:    addb %cl, %al
 ; SSE-NEXT:    # kill: def $al killed $al killed $eax
@@ -776,8 +776,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX1-NEXT:    movl %ecx, %eax
+; AVX1-NEXT:    vpmovmskb %xmm0, %eax
+; AVX1-NEXT:    movl %eax, %ecx
 ; AVX1-NEXT:    shrl $8, %eax
 ; AVX1-NEXT:    addb %cl, %al
 ; AVX1-NEXT:    # kill: def $al killed $al killed $eax
@@ -793,8 +793,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX2-NEXT:    movl %eax, %ecx
 ; AVX2-NEXT:    shrl $8, %eax
 ; AVX2-NEXT:    addb %cl, %al
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
diff --git a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
index 4d41c8406f6e0..a42a715bdc6ab 100644
--- a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
@@ -7,8 +7,8 @@
 define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0_(ptr %r) {
 ; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0_:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp .LBB0_1
 ; CHECK-NEXT:    .p2align 4
@@ -68,8 +68,8 @@ _ZNK4llvm5APInt13getActiveBitsEv.exit.i.i:        ; preds = %for.body.i.i.i.i.i
 define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0__assert(ptr %r) {
 ; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0__assert:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp .LBB1_1
 ; CHECK-NEXT:    .p2align 4
diff --git a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
index c1beb7c803b2b..c9c88f7258435 100644
--- a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
+++ b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
@@ -1031,31 +1031,30 @@ define void @simple_urem_fail_intermediate_inc(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je .LBB17_4
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
-; CHECK-NEXT:    movl $1, %r15d
+; CHECK-NEXT:    movl $1, %ebp
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB17_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    movl %r15d, %eax
+; CHECK-NEXT:    movl %ebp, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    divl %ebx
 ; CHECK-NEXT:    movl %edx, %edi
 ; CHECK-NEXT:    callq use.i32@PLT
-; CHECK-NEXT:    leal 1(%r14,%r15), %eax
-; CHECK-NEXT:    movl %r15d, %ecx
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    movl %ebp, %eax
+; CHECK-NEXT:    incl %ebp
+; CHECK-NEXT:    leal 1(%r14,%rax), %eax
 ; CHECK-NEXT:    cmpl $1, %eax
-; CHECK-NEXT:    movl %ecx, %r15d
 ; CHECK-NEXT:    jne .LBB17_2
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:  .LBB17_4: # %for.cond.cleanup
 ; CHECK-NEXT:    retq
 entry:
@@ -1199,32 +1198,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add(i32 %N, i32 %rem_amt_
 ; CHECK-NEXT:    cmpl $3, %edi
 ; CHECK-NEXT:    jb .LBB21_4
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    orl $16, %ebx
 ; CHECK-NEXT:    negl %r14d
-; CHECK-NEXT:    movl $7, %r15d
+; CHECK-NEXT:    movl $7, %ebp
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB21_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    movl %r15d, %eax
+; CHECK-NEXT:    movl %ebp, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    divl %ebx
 ; CHECK-NEXT:    movl %edx, %edi
 ; CHECK-NEXT:    callq use.i32@PLT
-; CHECK-NEXT:    leal 1(%r14,%r15), %eax
-; CHECK-NEXT:    movl %r15d, %ecx
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    movl %ebp, %eax
+; CHECK-NEXT:    incl %ebp
+; CHECK-NEXT:    leal 1(%r14,%rax), %eax
 ; CHECK-NEXT:    cmpl $5, %eax
-; CHECK-NEXT:    movl %ecx, %r15d
 ; CHECK-NEXT:    jne .LBB21_2
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:  .LBB21_4: # %for.cond.cleanup
 ; CHECK-NEXT:    retq
 entry:
@@ -1251,32 +1249,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_missing_nuw(i32
 ; CHECK-NEXT:    cmpl $3, %edi
 ; CHECK-NEXT:    jb .LBB22_4
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    orl $16, %ebx
 ; CHECK-NEXT:    negl %r14d
-; CHECK-NEXT:    movl $7, %r15d
+; CHECK-NEXT:    movl $7, %ebp
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB22_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    movl %r15d, %eax
+; CHECK-NEXT:    movl %ebp, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    divl %ebx
 ; CHECK-NEXT:    movl %edx, %edi
 ; CHECK-NEXT:    callq use.i32@PLT
-; CHECK-NEXT:    leal 1(%r14,%r15), %eax
-; CHECK-NEXT:    movl %r15d, %ecx
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    movl %ebp, %eax
+; CHECK-NEXT:    incl %ebp
+; CHECK-NEXT:    leal 1(%r14,%rax), %eax
 ; CHECK-NEXT:    cmpl $5, %eax
-; CHECK-NEXT:    movl %ecx, %r15d
 ; CHECK-NEXT:    jne .LBB22_2
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:  .LBB22_4: # %for.cond.cleanup
 ; CHECK-NEXT:    retq
 entry:
@@ -1303,31 +1300,30 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_no_simplify_rem(
 ; CHECK-NEXT:    cmpl $3, %edi
 ; CHECK-NEXT:    jb .LBB23_4
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
-; CHECK-NEXT:    movl $7, %r15d
+; CHECK-NEXT:    movl $7, %ebp
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB23_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    movl %r15d, %eax
+; CHECK-NEXT:    movl %ebp, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    divl %ebx
 ; CHECK-NEXT:    movl %edx, %edi
 ; CHECK-NEXT:    callq use.i32@PLT
-; CHECK-NEXT:    leal 1(%r14,%r15), %eax
-; CHECK-NEXT:    movl %r15d, %ecx
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    movl %ebp, %eax
+; CHECK-NEXT:    incl %ebp
+; CHECK-NEXT:    leal 1(%r14,%rax), %eax
 ; CHECK-NEXT:    cmpl $5, %eax
-; CHECK-NEXT:    movl %ecx, %r15d
 ; CHECK-NEXT:    jne .LBB23_2
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:  .LBB23_4: # %for.cond.cleanup
 ; CHECK-NEXT:    retq
 entry:
@@ -1404,32 +1400,31 @@ define void @simple_urem_to_sel_non_zero_start_through_sub_no_simplfy(i32 %N, i3
 ; CHECK-NEXT:    cmpl %edx, %edi
 ; CHECK-NEXT:    jbe .LBB25_4
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    movl %edx, %r15d
-; CHECK-NEXT:    movl %esi, %ebx
+; CHECK-NEXT:    movl %edx, %ebx
+; CHECK-NEXT:    movl %esi, %ebp
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
-; CHECK-NEXT:    addl $-2, %r15d
+; CHECK-NEXT:    addl $-2, %ebx
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB25_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    movl %r15d, %eax
+; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    divl %ebx
+; CHECK-NEXT:    divl %ebp
 ; CHECK-NEXT:    movl %edx, %edi
 ; CHECK-NEXT:    callq use.i32@PLT
-; CHECK-NEXT:    leal 1(%r14,%r15), %eax
-; CHECK-NEXT:    movl %r15d, %ecx
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    movl %ebx, %eax
+; CHECK-NEXT:    incl %ebx
+; CHECK-NEXT:    leal 1(%r14,%rax), %ea...
[truncated]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants