Skip to content

Commit e3eb142

Browse files
author
Vasileios Porpodas
committed
[Spill2Reg] Use AVX opcodes when available
This patch updates the vector spill/reload instructions to use the AVX opcodes by default if the targets supports it. This can be turned off with the -spill2reg-no-avx flag. Original review: https://reviews.llvm.org/D118951
1 parent a765fe3 commit e3eb142

12 files changed

+424
-25
lines changed

llvm/include/llvm/CodeGen/TargetInstrInfo.h

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2307,6 +2307,7 @@ class TargetInstrInfo : public MCInstrInfo {
23072307

23082308
virtual const TargetRegisterClass *
23092309
getVectorRegisterClassForSpill2Reg(const TargetRegisterInfo *TRI,
2310+
const TargetSubtargetInfo *STI,
23102311
Register SpilledReg) const {
23112312
llvm_unreachable(
23122313
"Target didn't implement "
@@ -2322,21 +2323,19 @@ class TargetInstrInfo : public MCInstrInfo {
23222323
}
23232324

23242325
/// Inserts \p SrcReg into the first lane of \p DstReg.
2325-
virtual MachineInstr *
2326-
spill2RegInsertToVectorReg(Register DstReg, Register SrcReg,
2327-
int OperationBits, MachineBasicBlock *MBB,
2328-
MachineBasicBlock::iterator InsertBeforeIt,
2329-
const TargetRegisterInfo *TRI) const {
2326+
virtual MachineInstr *spill2RegInsertToVectorReg(
2327+
Register DstReg, Register SrcReg, int OperationBits,
2328+
MachineBasicBlock *MBB, MachineBasicBlock::iterator InsertBeforeIt,
2329+
const TargetRegisterInfo *TRI, const TargetSubtargetInfo *STI) const {
23302330
llvm_unreachable(
23312331
"Target didn't implement TargetInstrInfo::spill2RegInsertToVectorReg!");
23322332
}
23332333

23342334
/// Extracts the first lane of \p SrcReg into \p DstReg.
2335-
virtual MachineInstr *
2336-
spill2RegExtractFromVectorReg(Register DstReg, Register SrcReg,
2337-
int OperationBits, MachineBasicBlock *InsertMBB,
2338-
MachineBasicBlock::iterator InsertBeforeIt,
2339-
const TargetRegisterInfo *TRI) const {
2335+
virtual MachineInstr *spill2RegExtractFromVectorReg(
2336+
Register DstReg, Register SrcReg, int OperationBits,
2337+
MachineBasicBlock *InsertMBB, MachineBasicBlock::iterator InsertBeforeIt,
2338+
const TargetRegisterInfo *TRI, const TargetSubtargetInfo *STI) const {
23402339
llvm_unreachable("Target didn't implement "
23412340
"TargetInstrInfo::spill2RegExtractFromVectorReg!");
23422341
}

llvm/lib/CodeGen/Spill2Reg.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ void Spill2Reg::replaceStackWithReg(StackSlotDataEntry &Entry,
345345

346346
TII->spill2RegInsertToVectorReg(
347347
VectorReg, OldReg, SpillData.SpillBits, StackSpill->getParent(),
348-
/*InsertBeforeIt=*/StackSpill->getIterator(), TRI);
348+
/*InsertBeforeIt=*/StackSpill->getIterator(), TRI, &MF->getSubtarget());
349349

350350
// Mark VectorReg as live in the instr's BB.
351351
LRUs[StackSpill->getParent()].addReg(VectorReg);
@@ -362,7 +362,8 @@ void Spill2Reg::replaceStackWithReg(StackSlotDataEntry &Entry,
362362

363363
TII->spill2RegExtractFromVectorReg(
364364
OldReg, VectorReg, ReloadData.SpillBits, StackReload->getParent(),
365-
/*InsertBeforeIt=*/StackReload->getIterator(), TRI);
365+
/*InsertBeforeIt=*/StackReload->getIterator(), TRI,
366+
&MF->getSubtarget());
366367

367368
// Mark VectorReg as live in the instr's BB.
368369
LRUs[StackReload->getParent()].addReg(VectorReg);
@@ -473,7 +474,8 @@ void Spill2Reg::generateCode() {
473474

474475
// Look for a physical register that in LRU.
475476
std::optional<MCRegister> PhysVectorRegOpt = tryGetFreePhysicalReg(
476-
TII->getVectorRegisterClassForSpill2Reg(TRI, Entry.getSpilledReg()),
477+
TII->getVectorRegisterClassForSpill2Reg(TRI, &MF->getSubtarget(),
478+
Entry.getSpilledReg()),
477479
LRU);
478480
if (!PhysVectorRegOpt)
479481
continue;

llvm/lib/Target/X86/X86InstrInfo.cpp

Lines changed: 29 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,10 @@ static cl::opt<int> Spill2RegExplorationDst(
9494
cl::desc("When checking for profitability, explore nearby instructions "
9595
"at this maximum distance."));
9696

97+
static cl::opt<bool> Spill2RegNoAVX(
98+
"spill2reg-no-avx", cl::Hidden, cl::init(false),
99+
cl::desc("Don't use AVX instructions even if the targets supports them."));
100+
97101
// Pin the vtable to this file.
98102
void X86InstrInfo::anchor() {}
99103

@@ -10967,11 +10971,18 @@ bool X86InstrInfo::targetSupportsSpill2Reg(
1096710971
return X86STI->hasSSE41();
1096810972
}
1096910973

10974+
static inline bool useAVX(const TargetSubtargetInfo *STI) {
10975+
const X86Subtarget *X86STI = static_cast<const X86Subtarget *>(STI);
10976+
bool UseAVX = X86STI->hasAVX() && !Spill2RegNoAVX;
10977+
return UseAVX;
10978+
}
10979+
1097010980
const TargetRegisterClass *
1097110981
X86InstrInfo::getVectorRegisterClassForSpill2Reg(const TargetRegisterInfo *TRI,
10982+
const TargetSubtargetInfo *STI,
1097210983
Register SpilledReg) const {
10973-
const TargetRegisterClass *VecRegClass =
10974-
TRI->getRegClass(X86::VR128RegClassID);
10984+
const TargetRegisterClass *VecRegClass = TRI->getRegClass(
10985+
useAVX(STI) ? X86::VR128XRegClassID : X86::VR128RegClassID);
1097510986
return VecRegClass;
1097610987
}
1097710988

@@ -11049,14 +11060,22 @@ bool X86InstrInfo::isSpill2RegProfitable(const MachineInstr *MI,
1104911060
return MemHeuristic && VecHeuristic;
1105011061
}
1105111062

11052-
static unsigned getInsertOrExtractOpcode(unsigned Bits, bool Insert) {
11063+
static unsigned getInsertOrExtractOpcode(unsigned Bits, bool Insert,
11064+
const TargetSubtargetInfo *STI) {
11065+
bool UseAVX = useAVX(STI);
1105311066
switch (Bits) {
1105411067
case 8:
1105511068
case 16:
1105611069
case 32:
11057-
return Insert ? X86::MOVDI2PDIrr : X86::MOVPDI2DIrr;
11070+
if (UseAVX)
11071+
return Insert ? X86::VMOVDI2PDIZrr : X86::VMOVPDI2DIZrr;
11072+
else
11073+
return Insert ? X86::MOVDI2PDIrr : X86::MOVPDI2DIrr;
1105811074
case 64:
11059-
return Insert ? X86::MOV64toPQIrr : X86::MOVPQIto64rr;
11075+
if (UseAVX)
11076+
return Insert ? X86::VMOV64toPQIZrr : X86::VMOVPQIto64Zrr;
11077+
else
11078+
return Insert ? X86::MOV64toPQIrr : X86::MOVPQIto64rr;
1106011079
default:
1106111080
llvm_unreachable("Unsupported bits");
1106211081
}
@@ -11094,11 +11113,11 @@ X86InstrInfo::getMovdCompatibleReg(MCRegister OldReg, uint32_t OldRegBits,
1109411113

1109511114
MachineInstr *X86InstrInfo::spill2RegInsertToVectorReg(
1109611115
Register DstReg, Register SrcReg, int OperationBits, MachineBasicBlock *MBB,
11097-
MachineBasicBlock::iterator InsertBeforeIt,
11098-
const TargetRegisterInfo *TRI) const {
11116+
MachineBasicBlock::iterator InsertBeforeIt, const TargetRegisterInfo *TRI,
11117+
const TargetSubtargetInfo *STI) const {
1109911118
DebugLoc DL;
1110011119
unsigned InsertOpcode =
11101-
getInsertOrExtractOpcode(OperationBits, true /*insert*/);
11120+
getInsertOrExtractOpcode(OperationBits, true /*insert*/, STI);
1110211121
const MCInstrDesc &InsertMCID = get(InsertOpcode);
1110311122
// `movd` does not support 8/16 bit operands. Instead, we use a 32-bit
1110411123
// register. For example:
@@ -11114,10 +11133,10 @@ MachineInstr *X86InstrInfo::spill2RegInsertToVectorReg(
1111411133
MachineInstr *X86InstrInfo::spill2RegExtractFromVectorReg(
1111511134
Register DstReg, Register SrcReg, int OperationBits,
1111611135
MachineBasicBlock *InsertMBB, MachineBasicBlock::iterator InsertBeforeIt,
11117-
const TargetRegisterInfo *TRI) const {
11136+
const TargetRegisterInfo *TRI, const TargetSubtargetInfo *STI) const {
1111811137
DebugLoc DL;
1111911138
unsigned ExtractOpcode =
11120-
getInsertOrExtractOpcode(OperationBits, false /*extract*/);
11139+
getInsertOrExtractOpcode(OperationBits, false /*extract*/, STI);
1112111140
const MCInstrDesc &ExtractMCID = get(ExtractOpcode);
1112211141
// `movd` does not support 8/16 bit operands. Instead, we use a 32-bit
1112311142
// register. For example:

llvm/lib/Target/X86/X86InstrInfo.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -750,6 +750,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
750750

751751
const TargetRegisterClass *
752752
getVectorRegisterClassForSpill2Reg(const TargetRegisterInfo *TRI,
753+
const TargetSubtargetInfo *STI,
753754
Register SpilledReg) const override;
754755

755756
bool isSpill2RegProfitable(const MachineInstr *MI,
@@ -764,13 +765,15 @@ class X86InstrInfo final : public X86GenInstrInfo {
764765
spill2RegInsertToVectorReg(Register DstReg, Register SrcReg,
765766
int OperationBits, MachineBasicBlock *MBB,
766767
MachineBasicBlock::iterator InsertBeforeIt,
767-
const TargetRegisterInfo *TRI) const override;
768+
const TargetRegisterInfo *TRI,
769+
const TargetSubtargetInfo *STI) const override;
768770

769771
MachineInstr *
770772
spill2RegExtractFromVectorReg(Register DstReg, Register SrcReg,
771773
int OperationBits, MachineBasicBlock *InsertMBB,
772774
MachineBasicBlock::iterator InsertBeforeIt,
773-
const TargetRegisterInfo *TRI) const override;
775+
const TargetRegisterInfo *TRI,
776+
const TargetSubtargetInfo *STI) const override;
774777
};
775778
} // namespace llvm
776779

llvm/test/CodeGen/X86/spill2reg_end_to_end_16bit.ll

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: llc %s -o - -mtriple=x86_64-unknown-linux -enable-spill2reg -mattr=+sse4.1 | FileCheck %s
3+
; RUN: llc %s -o - -mtriple=x86_64-unknown-linux -enable-spill2reg -mattr=+avx | FileCheck --check-prefix=AVX %s
34

45
; End-to-end check that Spill2Reg works with 16-bit registers.
56

@@ -130,6 +131,90 @@ define dso_local void @_Z5spillv() local_unnamed_addr #0 {
130131
; CHECK-NEXT: popq %rbp
131132
; CHECK-NEXT: .cfi_def_cfa_offset 8
132133
; CHECK-NEXT: retq
134+
;
135+
; AVX-LABEL: _Z5spillv:
136+
; AVX: # %bb.0: # %entry
137+
; AVX-NEXT: pushq %rbp
138+
; AVX-NEXT: .cfi_def_cfa_offset 16
139+
; AVX-NEXT: pushq %r15
140+
; AVX-NEXT: .cfi_def_cfa_offset 24
141+
; AVX-NEXT: pushq %r14
142+
; AVX-NEXT: .cfi_def_cfa_offset 32
143+
; AVX-NEXT: pushq %r13
144+
; AVX-NEXT: .cfi_def_cfa_offset 40
145+
; AVX-NEXT: pushq %r12
146+
; AVX-NEXT: .cfi_def_cfa_offset 48
147+
; AVX-NEXT: pushq %rbx
148+
; AVX-NEXT: .cfi_def_cfa_offset 56
149+
; AVX-NEXT: .cfi_offset %rbx, -56
150+
; AVX-NEXT: .cfi_offset %r12, -48
151+
; AVX-NEXT: .cfi_offset %r13, -40
152+
; AVX-NEXT: .cfi_offset %r14, -32
153+
; AVX-NEXT: .cfi_offset %r15, -24
154+
; AVX-NEXT: .cfi_offset %rbp, -16
155+
; AVX-NEXT: movw D0(%rip), %ax
156+
; AVX-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
157+
; AVX-NEXT: movzwl D1(%rip), %ecx
158+
; AVX-NEXT: movzwl D2(%rip), %edx
159+
; AVX-NEXT: movzwl D3(%rip), %esi
160+
; AVX-NEXT: movzwl D4(%rip), %edi
161+
; AVX-NEXT: movzwl D5(%rip), %r8d
162+
; AVX-NEXT: movzwl D6(%rip), %r9d
163+
; AVX-NEXT: movzwl D7(%rip), %r10d
164+
; AVX-NEXT: movzwl D8(%rip), %r11d
165+
; AVX-NEXT: movzwl D9(%rip), %ebx
166+
; AVX-NEXT: movzwl D10(%rip), %ebp
167+
; AVX-NEXT: movzwl D11(%rip), %r14d
168+
; AVX-NEXT: movzwl D12(%rip), %r15d
169+
; AVX-NEXT: movzwl D13(%rip), %r12d
170+
; AVX-NEXT: movzwl D14(%rip), %r13d
171+
; AVX-NEXT: movw D15(%rip), %ax
172+
; AVX-NEXT: vmovd %eax, %xmm0
173+
; AVX-NEXT: movw D16(%rip), %ax
174+
; AVX-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
175+
; AVX-NEXT: movw D17(%rip), %ax
176+
; AVX-NEXT: vmovd %eax, %xmm1
177+
; AVX-NEXT: movzwl D18(%rip), %eax
178+
; AVX-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
179+
; AVX-NEXT: #APP
180+
; AVX-NEXT: #NO_APP
181+
; AVX-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 2-byte Folded Reload
182+
; AVX-NEXT: movw %ax, U0(%rip)
183+
; AVX-NEXT: movw %cx, U1(%rip)
184+
; AVX-NEXT: movw %dx, U2(%rip)
185+
; AVX-NEXT: movw %si, U3(%rip)
186+
; AVX-NEXT: movw %di, U4(%rip)
187+
; AVX-NEXT: movw %r8w, U5(%rip)
188+
; AVX-NEXT: movw %r9w, U6(%rip)
189+
; AVX-NEXT: movw %r10w, U7(%rip)
190+
; AVX-NEXT: movw %r11w, U8(%rip)
191+
; AVX-NEXT: movw %bx, U9(%rip)
192+
; AVX-NEXT: movw %bp, U10(%rip)
193+
; AVX-NEXT: movw %r14w, U11(%rip)
194+
; AVX-NEXT: movw %r15w, U12(%rip)
195+
; AVX-NEXT: movw %r12w, U13(%rip)
196+
; AVX-NEXT: movw %r13w, U14(%rip)
197+
; AVX-NEXT: vmovd %xmm0, %eax
198+
; AVX-NEXT: movw %ax, U15(%rip)
199+
; AVX-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 2-byte Folded Reload
200+
; AVX-NEXT: movw %ax, U16(%rip)
201+
; AVX-NEXT: vmovd %xmm1, %eax
202+
; AVX-NEXT: movw %ax, U17(%rip)
203+
; AVX-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 2-byte Folded Reload
204+
; AVX-NEXT: movw %ax, U18(%rip)
205+
; AVX-NEXT: popq %rbx
206+
; AVX-NEXT: .cfi_def_cfa_offset 48
207+
; AVX-NEXT: popq %r12
208+
; AVX-NEXT: .cfi_def_cfa_offset 40
209+
; AVX-NEXT: popq %r13
210+
; AVX-NEXT: .cfi_def_cfa_offset 32
211+
; AVX-NEXT: popq %r14
212+
; AVX-NEXT: .cfi_def_cfa_offset 24
213+
; AVX-NEXT: popq %r15
214+
; AVX-NEXT: .cfi_def_cfa_offset 16
215+
; AVX-NEXT: popq %rbp
216+
; AVX-NEXT: .cfi_def_cfa_offset 8
217+
; AVX-NEXT: retq
133218
entry:
134219
%0 = load i16, i16* @D0
135220
%1 = load i16, i16* @D1

llvm/test/CodeGen/X86/spill2reg_end_to_end_32bit.ll

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: llc %s -o - -mtriple=x86_64-unknown-linux -enable-spill2reg -mattr=+sse4.1 | FileCheck %s
3+
; RUN: llc %s -o - -mtriple=x86_64-unknown-linux -enable-spill2reg -mattr=+sse4.1 | FileCheck --check-prefix=AVX %s
34

45
; End-to-end check that Spill2Reg works with 32-bit registers.
56

@@ -130,6 +131,90 @@ define dso_local void @_Z5spillv() local_unnamed_addr #0 {
130131
; CHECK-NEXT: popq %rbp
131132
; CHECK-NEXT: .cfi_def_cfa_offset 8
132133
; CHECK-NEXT: retq
134+
;
135+
; AVX-LABEL: _Z5spillv:
136+
; AVX: # %bb.0: # %entry
137+
; AVX-NEXT: pushq %rbp
138+
; AVX-NEXT: .cfi_def_cfa_offset 16
139+
; AVX-NEXT: pushq %r15
140+
; AVX-NEXT: .cfi_def_cfa_offset 24
141+
; AVX-NEXT: pushq %r14
142+
; AVX-NEXT: .cfi_def_cfa_offset 32
143+
; AVX-NEXT: pushq %r13
144+
; AVX-NEXT: .cfi_def_cfa_offset 40
145+
; AVX-NEXT: pushq %r12
146+
; AVX-NEXT: .cfi_def_cfa_offset 48
147+
; AVX-NEXT: pushq %rbx
148+
; AVX-NEXT: .cfi_def_cfa_offset 56
149+
; AVX-NEXT: .cfi_offset %rbx, -56
150+
; AVX-NEXT: .cfi_offset %r12, -48
151+
; AVX-NEXT: .cfi_offset %r13, -40
152+
; AVX-NEXT: .cfi_offset %r14, -32
153+
; AVX-NEXT: .cfi_offset %r15, -24
154+
; AVX-NEXT: .cfi_offset %rbp, -16
155+
; AVX-NEXT: movl D0(%rip), %eax
156+
; AVX-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
157+
; AVX-NEXT: movl D1(%rip), %ecx
158+
; AVX-NEXT: movl D2(%rip), %edx
159+
; AVX-NEXT: movl D3(%rip), %esi
160+
; AVX-NEXT: movl D4(%rip), %edi
161+
; AVX-NEXT: movl D5(%rip), %r8d
162+
; AVX-NEXT: movl D6(%rip), %r9d
163+
; AVX-NEXT: movl D7(%rip), %r10d
164+
; AVX-NEXT: movl D8(%rip), %r11d
165+
; AVX-NEXT: movl D9(%rip), %ebx
166+
; AVX-NEXT: movl D10(%rip), %ebp
167+
; AVX-NEXT: movl D11(%rip), %r14d
168+
; AVX-NEXT: movl D12(%rip), %r15d
169+
; AVX-NEXT: movl D13(%rip), %r12d
170+
; AVX-NEXT: movl D14(%rip), %r13d
171+
; AVX-NEXT: movl D15(%rip), %eax
172+
; AVX-NEXT: movd %eax, %xmm0
173+
; AVX-NEXT: movl D16(%rip), %eax
174+
; AVX-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
175+
; AVX-NEXT: movl D17(%rip), %eax
176+
; AVX-NEXT: movd %eax, %xmm1
177+
; AVX-NEXT: movl D18(%rip), %eax
178+
; AVX-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
179+
; AVX-NEXT: #APP
180+
; AVX-NEXT: #NO_APP
181+
; AVX-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
182+
; AVX-NEXT: movl %eax, U0(%rip)
183+
; AVX-NEXT: movl %ecx, U1(%rip)
184+
; AVX-NEXT: movl %edx, U2(%rip)
185+
; AVX-NEXT: movl %esi, U3(%rip)
186+
; AVX-NEXT: movl %edi, U4(%rip)
187+
; AVX-NEXT: movl %r8d, U5(%rip)
188+
; AVX-NEXT: movl %r9d, U6(%rip)
189+
; AVX-NEXT: movl %r10d, U7(%rip)
190+
; AVX-NEXT: movl %r11d, U8(%rip)
191+
; AVX-NEXT: movl %ebx, U9(%rip)
192+
; AVX-NEXT: movl %ebp, U10(%rip)
193+
; AVX-NEXT: movl %r14d, U11(%rip)
194+
; AVX-NEXT: movl %r15d, U12(%rip)
195+
; AVX-NEXT: movl %r12d, U13(%rip)
196+
; AVX-NEXT: movl %r13d, U14(%rip)
197+
; AVX-NEXT: movd %xmm0, %eax
198+
; AVX-NEXT: movl %eax, U15(%rip)
199+
; AVX-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
200+
; AVX-NEXT: movl %eax, U16(%rip)
201+
; AVX-NEXT: movd %xmm1, %eax
202+
; AVX-NEXT: movl %eax, U17(%rip)
203+
; AVX-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
204+
; AVX-NEXT: movl %eax, U18(%rip)
205+
; AVX-NEXT: popq %rbx
206+
; AVX-NEXT: .cfi_def_cfa_offset 48
207+
; AVX-NEXT: popq %r12
208+
; AVX-NEXT: .cfi_def_cfa_offset 40
209+
; AVX-NEXT: popq %r13
210+
; AVX-NEXT: .cfi_def_cfa_offset 32
211+
; AVX-NEXT: popq %r14
212+
; AVX-NEXT: .cfi_def_cfa_offset 24
213+
; AVX-NEXT: popq %r15
214+
; AVX-NEXT: .cfi_def_cfa_offset 16
215+
; AVX-NEXT: popq %rbp
216+
; AVX-NEXT: .cfi_def_cfa_offset 8
217+
; AVX-NEXT: retq
133218
entry:
134219
%0 = load i32, i32* @D0
135220
%1 = load i32, i32* @D1

0 commit comments

Comments
 (0)