Skip to content

Commit c5be0e0

Browse files
committed
[X86] Fix tile register spill issue.
The tile register spill need 2 instructions. %46:gr64_nosp = MOV64ri 64 TILESTORED %stack.2, 1, killed %46:gr64_nosp, 0, $noreg, %43:tile The first instruction load the stride to a GPR, and the second instruction store tile register to stack slot. The optimization of merge spill instruction is done after register allocation. And spill tile register need create a new virtual register to for stride, so we can't hoist tile spill instruction in postOptimization() of register allocation. We can't hoist TILESTORED alone and we can't hoist the 2 instuctions together because MOV64ri will clobber some GPR. This patch is to disble the spill merge for any spill which need 2 instructions. Differential Revision: https://reviews.llvm.org/D93898
1 parent 1677c86 commit c5be0e0

File tree

2 files changed

+166
-5
lines changed

2 files changed

+166
-5
lines changed

llvm/lib/CodeGen/InlineSpiller.cpp

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,14 @@ static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
269269
return Register();
270270
}
271271

272+
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
273+
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
274+
const MachineOperand &MO = MI.getOperand(I);
275+
if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
276+
LIS.getInterval(MO.getReg());
277+
}
278+
}
279+
272280
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
273281
/// It is assumed that SnipLI is a virtual register with the same original as
274282
/// Edit->getReg().
@@ -410,14 +418,21 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
410418
MII = DefMI;
411419
++MII;
412420
}
421+
MachineInstrSpan MIS(MII, MBB);
413422
// Insert spill without kill flag immediately after def.
414423
TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
415424
MRI.getRegClass(SrcReg), &TRI);
425+
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
426+
for (const MachineInstr &MI : make_range(MIS.begin(), MII))
427+
getVDefInterval(MI, LIS);
416428
--MII; // Point to store instruction.
417-
LIS.InsertMachineInstrInMaps(*MII);
418429
LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
419430

420-
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
431+
// If there is only 1 store instruction is required for spill, add it
432+
// to mergeable list. In X86 AMX, 2 intructions are required to store.
433+
// We disable the merge for this case.
434+
if (std::distance(MIS.begin(), MII) <= 1)
435+
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
421436
++NumSpills;
422437
return true;
423438
}
@@ -918,7 +933,11 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
918933
++NumFolded;
919934
else if (Ops.front().second == 0) {
920935
++NumSpills;
921-
HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
936+
// If there is only 1 store instruction is required for spill, add it
937+
// to mergeable list. In X86 AMX, 2 intructions are required to store.
938+
// We disable the merge for this case.
939+
if (std::distance(MIS.begin(), MIS.end()) <= 1)
940+
HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
922941
} else
923942
++NumReloads;
924943
return true;
@@ -965,6 +984,7 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
965984
MachineInstrSpan MIS(MI, &MBB);
966985
MachineBasicBlock::iterator SpillBefore = std::next(MI);
967986
bool IsRealSpill = isRealSpill(*MI);
987+
968988
if (IsRealSpill)
969989
TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
970990
MRI.getRegClass(NewVReg), &TRI);
@@ -978,11 +998,16 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
978998

979999
MachineBasicBlock::iterator Spill = std::next(MI);
9801000
LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1001+
for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1002+
getVDefInterval(MI, LIS);
9811003

9821004
LLVM_DEBUG(
9831005
dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
9841006
++NumSpills;
985-
if (IsRealSpill)
1007+
// If there is only 1 store instruction is required for spill, add it
1008+
// to mergeable list. In X86 AMX, 2 intructions are required to store.
1009+
// We disable the merge for this case.
1010+
if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
9861011
HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
9871012
}
9881013

@@ -1529,9 +1554,12 @@ void HoistSpillHelper::hoistAllSpills() {
15291554
MachineBasicBlock *BB = Insert.first;
15301555
Register LiveReg = Insert.second;
15311556
MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1557+
MachineInstrSpan MIS(MI, BB);
15321558
TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
15331559
MRI.getRegClass(LiveReg), &TRI);
1534-
LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1560+
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
1561+
for (const MachineInstr &MI : make_range(MIS.begin(), MI))
1562+
getVDefInterval(MI, LIS);
15351563
++NumSpills;
15361564
}
15371565

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs | FileCheck %s
3+
4+
@buf = dso_local global [3072 x i8] zeroinitializer, align 16
5+
6+
define dso_local void @test_api(i16 signext %0, i16 signext %1) local_unnamed_addr {
7+
; CHECK-LABEL: test_api:
8+
; CHECK: # %bb.0:
9+
; CHECK-NEXT: pushq %rbp
10+
; CHECK-NEXT: .cfi_def_cfa_offset 16
11+
; CHECK-NEXT: pushq %r15
12+
; CHECK-NEXT: .cfi_def_cfa_offset 24
13+
; CHECK-NEXT: pushq %r14
14+
; CHECK-NEXT: .cfi_def_cfa_offset 32
15+
; CHECK-NEXT: pushq %rbx
16+
; CHECK-NEXT: .cfi_def_cfa_offset 40
17+
; CHECK-NEXT: subq $4056, %rsp # imm = 0xFD8
18+
; CHECK-NEXT: .cfi_def_cfa_offset 4096
19+
; CHECK-NEXT: .cfi_offset %rbx, -40
20+
; CHECK-NEXT: .cfi_offset %r14, -32
21+
; CHECK-NEXT: .cfi_offset %r15, -24
22+
; CHECK-NEXT: .cfi_offset %rbp, -16
23+
; CHECK-NEXT: movl %esi, %ebx
24+
; CHECK-NEXT: movl %edi, %ebp
25+
; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
26+
; CHECK-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
27+
; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp)
28+
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
29+
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
30+
; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp)
31+
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
32+
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
33+
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
34+
; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp)
35+
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
36+
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
37+
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
38+
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
39+
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
40+
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
41+
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
42+
; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
43+
; CHECK-NEXT: movl $32, %r14d
44+
; CHECK-NEXT: movl $buf+2048, %r15d
45+
; CHECK-NEXT: tileloadd (%r15,%r14), %tmm5
46+
; CHECK-NEXT: xorl %eax, %eax
47+
; CHECK-NEXT: testb %al, %al
48+
; CHECK-NEXT: sttilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Spill
49+
; CHECK-NEXT: movl $buf, %eax
50+
; CHECK-NEXT: movw $8, %cx
51+
; CHECK-NEXT: jne .LBB0_2
52+
; CHECK-NEXT: # %bb.1: # %if.true
53+
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm0
54+
; CHECK-NEXT: movl $buf+1024, %eax
55+
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm1
56+
; CHECK-NEXT: movabsq $64, %rax
57+
; CHECK-NEXT: tilestored %tmm5, 2048(%rsp,%rax) # 1024-byte Folded Spill
58+
; CHECK-NEXT: tdpbssd %tmm1, %tmm0, %tmm5
59+
; CHECK-NEXT: movabsq $64, %rax
60+
; CHECK-NEXT: tilestored %tmm5, 1024(%rsp,%rax) # 1024-byte Folded Spill
61+
; CHECK-NEXT: xorl %eax, %eax
62+
; CHECK-NEXT: vzeroupper
63+
; CHECK-NEXT: callq foo
64+
; CHECK-NEXT: ldtilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Reload
65+
; CHECK-NEXT: movabsq $64, %rax
66+
; CHECK-NEXT: tileloadd 1024(%rsp,%rax), %tmm6 # 1024-byte Folded Reload
67+
; CHECK-NEXT: jmp .LBB0_3
68+
; CHECK-NEXT: .LBB0_2: # %if.false
69+
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm2
70+
; CHECK-NEXT: movl $buf+1024, %eax
71+
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm3
72+
; CHECK-NEXT: movabsq $64, %rax
73+
; CHECK-NEXT: tilestored %tmm5, 2048(%rsp,%rax) # 1024-byte Folded Spill
74+
; CHECK-NEXT: tdpbssd %tmm3, %tmm2, %tmm5
75+
; CHECK-NEXT: movabsq $64, %rax
76+
; CHECK-NEXT: tilestored %tmm5, 1024(%rsp,%rax) # 1024-byte Folded Spill
77+
; CHECK-NEXT: xorl %eax, %eax
78+
; CHECK-NEXT: vzeroupper
79+
; CHECK-NEXT: callq foo
80+
; CHECK-NEXT: ldtilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Reload
81+
; CHECK-NEXT: movabsq $64, %rax
82+
; CHECK-NEXT: tileloadd 1024(%rsp,%rax), %tmm6 # 1024-byte Folded Reload
83+
; CHECK-NEXT: tilestored %tmm6, (%r15,%r14)
84+
; CHECK-NEXT: .LBB0_3: # %exit
85+
; CHECK-NEXT: movl $buf, %eax
86+
; CHECK-NEXT: movl $32, %ecx
87+
; CHECK-NEXT: movw $8, %dx
88+
; CHECK-NEXT: tileloadd (%rax,%rcx), %tmm4
89+
; CHECK-NEXT: movabsq $64, %rax
90+
; CHECK-NEXT: tileloadd 2048(%rsp,%rax), %tmm5 # 1024-byte Folded Reload
91+
; CHECK-NEXT: tdpbssd %tmm4, %tmm6, %tmm5
92+
; CHECK-NEXT: movl $buf+2048, %eax
93+
; CHECK-NEXT: tilestored %tmm5, (%rax,%rcx)
94+
; CHECK-NEXT: addq $4056, %rsp # imm = 0xFD8
95+
; CHECK-NEXT: .cfi_def_cfa_offset 40
96+
; CHECK-NEXT: popq %rbx
97+
; CHECK-NEXT: .cfi_def_cfa_offset 32
98+
; CHECK-NEXT: popq %r14
99+
; CHECK-NEXT: .cfi_def_cfa_offset 24
100+
; CHECK-NEXT: popq %r15
101+
; CHECK-NEXT: .cfi_def_cfa_offset 16
102+
; CHECK-NEXT: popq %rbp
103+
; CHECK-NEXT: .cfi_def_cfa_offset 8
104+
; CHECK-NEXT: tilerelease
105+
; CHECK-NEXT: retq
106+
%c = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32)
107+
br i1 undef, label %if.true, label %if.false
108+
if.true:
109+
%a1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
110+
%b1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 1024), i64 32)
111+
%d1 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %a1, x86_amx %b1)
112+
tail call void (...) @foo()
113+
br label %exit
114+
if.false:
115+
%a2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
116+
%b2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 1024), i64 32)
117+
%d2 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %a2, x86_amx %b2)
118+
tail call void (...) @foo()
119+
tail call void @llvm.x86.tilestored64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32, x86_amx %d2)
120+
br label %exit
121+
exit:
122+
%d = phi x86_amx [ %d1, %if.true ], [ %d2, %if.false ]
123+
%a = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
124+
%res = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %d, x86_amx %a)
125+
tail call void @llvm.x86.tilestored64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32, x86_amx %res)
126+
ret void
127+
}
128+
129+
declare dso_local void @foo(...) local_unnamed_addr
130+
131+
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
132+
declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
133+
declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)

0 commit comments

Comments
 (0)