Skip to content

Commit 3c609f3

Browse files
authored
[RISCV] Merge int_riscv_masked_atomicrmw_*_i32/i64 intrinsics using llvm_anyint_ty. (#154845)
I think having separate intrinsics for RV32 and RV64 is making some things more complicated than using type overloading. This reduces the number of isel patterns in the .td file. They're still expanded my HwMode so it doesn't reduce the binary size. getIntrinsicForMaskedAtomicRMWBinOp no longer needs to look at XLen.
1 parent bcc27dc commit 3c609f3

File tree

3 files changed

+85
-146
lines changed

3 files changed

+85
-146
lines changed

llvm/include/llvm/IR/IntrinsicsRISCV.td

Lines changed: 22 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@
1515

1616
// Atomic Intrinsics have multiple versions for different access widths, which
1717
// all follow one of the following signatures (depending on how many arguments
18-
// they require). We carefully instantiate only specific versions of these for
19-
// specific integer widths, rather than using `llvm_anyint_ty`.
18+
// they require).
2019
//
2120
// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
2221
// canonical names, and the intrinsics used in the code will have a name
@@ -25,52 +24,39 @@
2524

2625
let TargetPrefix = "riscv" in {
2726

28-
// T @llvm.<name>.T.<p>(any*, T, T, T imm);
29-
class RISCVMaskedAtomicRMWFourArg<LLVMType itype>
30-
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
27+
// T @llvm.<name>.<i>.<p>(any*, T, T, T imm);
28+
class RISCVMaskedAtomicRMWFourArg
29+
: Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, LLVMMatchType<0>,
30+
LLVMMatchType<0>, LLVMMatchType<0>],
3131
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32-
// T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33-
class RISCVMaskedAtomicRMWFiveArg<LLVMType itype>
34-
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
32+
// T @llvm.<name>.<i>.<p>(any*, T, T, T, T imm);
33+
class RISCVMaskedAtomicRMWFiveArg
34+
: Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, LLVMMatchType<0>,
35+
LLVMMatchType<0>, LLVMMatchType<0>,
36+
LLVMMatchType<0>],
3537
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
3638

37-
// We define 32-bit and 64-bit variants of the above, where T stands for i32
38-
// or i64 respectively:
39-
multiclass RISCVMaskedAtomicRMWFourArgIntrinsics {
40-
// i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41-
def _i32 : RISCVMaskedAtomicRMWFourArg<llvm_i32_ty>;
42-
// i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43-
def _i64 : RISCVMaskedAtomicRMWFourArg<llvm_i64_ty>;
44-
}
45-
46-
multiclass RISCVMaskedAtomicRMWFiveArgIntrinsics {
47-
// i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48-
def _i32 : RISCVMaskedAtomicRMWFiveArg<llvm_i32_ty>;
49-
// i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50-
def _i64 : RISCVMaskedAtomicRMWFiveArg<llvm_i64_ty>;
51-
}
52-
5339
// These intrinsics are intended only for internal compiler use (i.e. as
5440
// part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
5541
// names and semantics could change in the future.
5642

57-
// @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
43+
// @llvm.riscv.masked.atomicrmw.*.<i>.<p>(
5844
// ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
59-
defm int_riscv_masked_atomicrmw_xchg : RISCVMaskedAtomicRMWFourArgIntrinsics;
60-
defm int_riscv_masked_atomicrmw_add : RISCVMaskedAtomicRMWFourArgIntrinsics;
61-
defm int_riscv_masked_atomicrmw_sub : RISCVMaskedAtomicRMWFourArgIntrinsics;
62-
defm int_riscv_masked_atomicrmw_nand : RISCVMaskedAtomicRMWFourArgIntrinsics;
63-
defm int_riscv_masked_atomicrmw_umax : RISCVMaskedAtomicRMWFourArgIntrinsics;
64-
defm int_riscv_masked_atomicrmw_umin : RISCVMaskedAtomicRMWFourArgIntrinsics;
45+
def int_riscv_masked_atomicrmw_xchg : RISCVMaskedAtomicRMWFourArg;
46+
def int_riscv_masked_atomicrmw_add : RISCVMaskedAtomicRMWFourArg;
47+
def int_riscv_masked_atomicrmw_sub : RISCVMaskedAtomicRMWFourArg;
48+
def int_riscv_masked_atomicrmw_nand : RISCVMaskedAtomicRMWFourArg;
49+
def int_riscv_masked_atomicrmw_umax : RISCVMaskedAtomicRMWFourArg;
50+
def int_riscv_masked_atomicrmw_umin : RISCVMaskedAtomicRMWFourArg;
6551
// Signed min and max need an extra operand to do sign extension with.
66-
// @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
52+
// @llvm.riscv.masked.atomicrmw.{max,min}.<i>.<p>(
6753
// ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
68-
defm int_riscv_masked_atomicrmw_max : RISCVMaskedAtomicRMWFiveArgIntrinsics;
69-
defm int_riscv_masked_atomicrmw_min : RISCVMaskedAtomicRMWFiveArgIntrinsics;
54+
def int_riscv_masked_atomicrmw_max : RISCVMaskedAtomicRMWFiveArg;
55+
def int_riscv_masked_atomicrmw_min : RISCVMaskedAtomicRMWFiveArg;
7056

71-
// @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
57+
// @llvm.riscv.masked.cmpxchg.<i>.<p>(
7258
// ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
73-
defm int_riscv_masked_cmpxchg : RISCVMaskedAtomicRMWFiveArgIntrinsics;
59+
def int_riscv_masked_cmpxchg : RISCVMaskedAtomicRMWFiveArg;
7460

7561
} // TargetPrefix = "riscv"
7662

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 46 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -1800,15 +1800,20 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
18001800
switch (Intrinsic) {
18011801
default:
18021802
return false;
1803-
case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1804-
case Intrinsic::riscv_masked_atomicrmw_add_i32:
1805-
case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1806-
case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1807-
case Intrinsic::riscv_masked_atomicrmw_max_i32:
1808-
case Intrinsic::riscv_masked_atomicrmw_min_i32:
1809-
case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1810-
case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1811-
case Intrinsic::riscv_masked_cmpxchg_i32:
1803+
case Intrinsic::riscv_masked_atomicrmw_xchg:
1804+
case Intrinsic::riscv_masked_atomicrmw_add:
1805+
case Intrinsic::riscv_masked_atomicrmw_sub:
1806+
case Intrinsic::riscv_masked_atomicrmw_nand:
1807+
case Intrinsic::riscv_masked_atomicrmw_max:
1808+
case Intrinsic::riscv_masked_atomicrmw_min:
1809+
case Intrinsic::riscv_masked_atomicrmw_umax:
1810+
case Intrinsic::riscv_masked_atomicrmw_umin:
1811+
case Intrinsic::riscv_masked_cmpxchg:
1812+
// riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
1813+
// narrow atomic operation. These will be expanded to an LR/SC loop that
1814+
// reads/writes to/from an aligned 4 byte location. And, or, shift, etc.
1815+
// will be used to modify the appropriate part of the 4 byte data and
1816+
// preserve the rest.
18121817
Info.opc = ISD::INTRINSIC_W_CHAIN;
18131818
Info.memVT = MVT::i32;
18141819
Info.ptrVal = I.getArgOperand(0);
@@ -21478,24 +21483,23 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
2147821483
switch (IntNo) {
2147921484
default:
2148021485
break;
21481-
case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
21482-
case Intrinsic::riscv_masked_atomicrmw_add_i64:
21483-
case Intrinsic::riscv_masked_atomicrmw_sub_i64:
21484-
case Intrinsic::riscv_masked_atomicrmw_nand_i64:
21485-
case Intrinsic::riscv_masked_atomicrmw_max_i64:
21486-
case Intrinsic::riscv_masked_atomicrmw_min_i64:
21487-
case Intrinsic::riscv_masked_atomicrmw_umax_i64:
21488-
case Intrinsic::riscv_masked_atomicrmw_umin_i64:
21489-
case Intrinsic::riscv_masked_cmpxchg_i64:
21486+
case Intrinsic::riscv_masked_atomicrmw_xchg:
21487+
case Intrinsic::riscv_masked_atomicrmw_add:
21488+
case Intrinsic::riscv_masked_atomicrmw_sub:
21489+
case Intrinsic::riscv_masked_atomicrmw_nand:
21490+
case Intrinsic::riscv_masked_atomicrmw_max:
21491+
case Intrinsic::riscv_masked_atomicrmw_min:
21492+
case Intrinsic::riscv_masked_atomicrmw_umax:
21493+
case Intrinsic::riscv_masked_atomicrmw_umin:
21494+
case Intrinsic::riscv_masked_cmpxchg:
2149021495
// riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
2149121496
// narrow atomic operation. These are implemented using atomic
2149221497
// operations at the minimum supported atomicrmw/cmpxchg width whose
2149321498
// result is then sign extended to XLEN. With +A, the minimum width is
2149421499
// 32 for both 64 and 32.
21495-
assert(Subtarget.getXLen() == 64);
2149621500
assert(getMinCmpXchgSizeInBits() == 32);
2149721501
assert(Subtarget.hasStdExtA());
21498-
return 33;
21502+
return Op.getValueSizeInBits() - 31;
2149921503
}
2150021504
break;
2150121505
}
@@ -23786,53 +23790,26 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2378623790

2378723791
static Intrinsic::ID
2378823792
getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
23789-
if (XLen == 32) {
23790-
switch (BinOp) {
23791-
default:
23792-
llvm_unreachable("Unexpected AtomicRMW BinOp");
23793-
case AtomicRMWInst::Xchg:
23794-
return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
23795-
case AtomicRMWInst::Add:
23796-
return Intrinsic::riscv_masked_atomicrmw_add_i32;
23797-
case AtomicRMWInst::Sub:
23798-
return Intrinsic::riscv_masked_atomicrmw_sub_i32;
23799-
case AtomicRMWInst::Nand:
23800-
return Intrinsic::riscv_masked_atomicrmw_nand_i32;
23801-
case AtomicRMWInst::Max:
23802-
return Intrinsic::riscv_masked_atomicrmw_max_i32;
23803-
case AtomicRMWInst::Min:
23804-
return Intrinsic::riscv_masked_atomicrmw_min_i32;
23805-
case AtomicRMWInst::UMax:
23806-
return Intrinsic::riscv_masked_atomicrmw_umax_i32;
23807-
case AtomicRMWInst::UMin:
23808-
return Intrinsic::riscv_masked_atomicrmw_umin_i32;
23809-
}
23810-
}
23811-
23812-
if (XLen == 64) {
23813-
switch (BinOp) {
23814-
default:
23815-
llvm_unreachable("Unexpected AtomicRMW BinOp");
23816-
case AtomicRMWInst::Xchg:
23817-
return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
23818-
case AtomicRMWInst::Add:
23819-
return Intrinsic::riscv_masked_atomicrmw_add_i64;
23820-
case AtomicRMWInst::Sub:
23821-
return Intrinsic::riscv_masked_atomicrmw_sub_i64;
23822-
case AtomicRMWInst::Nand:
23823-
return Intrinsic::riscv_masked_atomicrmw_nand_i64;
23824-
case AtomicRMWInst::Max:
23825-
return Intrinsic::riscv_masked_atomicrmw_max_i64;
23826-
case AtomicRMWInst::Min:
23827-
return Intrinsic::riscv_masked_atomicrmw_min_i64;
23828-
case AtomicRMWInst::UMax:
23829-
return Intrinsic::riscv_masked_atomicrmw_umax_i64;
23830-
case AtomicRMWInst::UMin:
23831-
return Intrinsic::riscv_masked_atomicrmw_umin_i64;
23832-
}
23793+
switch (BinOp) {
23794+
default:
23795+
llvm_unreachable("Unexpected AtomicRMW BinOp");
23796+
case AtomicRMWInst::Xchg:
23797+
return Intrinsic::riscv_masked_atomicrmw_xchg;
23798+
case AtomicRMWInst::Add:
23799+
return Intrinsic::riscv_masked_atomicrmw_add;
23800+
case AtomicRMWInst::Sub:
23801+
return Intrinsic::riscv_masked_atomicrmw_sub;
23802+
case AtomicRMWInst::Nand:
23803+
return Intrinsic::riscv_masked_atomicrmw_nand;
23804+
case AtomicRMWInst::Max:
23805+
return Intrinsic::riscv_masked_atomicrmw_max;
23806+
case AtomicRMWInst::Min:
23807+
return Intrinsic::riscv_masked_atomicrmw_min;
23808+
case AtomicRMWInst::UMax:
23809+
return Intrinsic::riscv_masked_atomicrmw_umax;
23810+
case AtomicRMWInst::UMin:
23811+
return Intrinsic::riscv_masked_atomicrmw_umin;
2383323812
}
23834-
23835-
llvm_unreachable("Unexpected XLen\n");
2383623813
}
2383723814

2383823815
Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
@@ -23857,7 +23834,7 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2385723834
unsigned XLen = Subtarget.getXLen();
2385823835
Value *Ordering =
2385923836
Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
23860-
Type *Tys[] = {AlignedAddr->getType()};
23837+
Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
2386123838
Function *LrwOpScwLoop = Intrinsic::getOrInsertDeclaration(
2386223839
AI->getModule(),
2386323840
getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
@@ -23913,14 +23890,13 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2391323890
Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2391423891
unsigned XLen = Subtarget.getXLen();
2391523892
Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
23916-
Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
23893+
Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg;
2391723894
if (XLen == 64) {
2391823895
CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2391923896
NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2392023897
Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
23921-
CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2392223898
}
23923-
Type *Tys[] = {AlignedAddr->getType()};
23899+
Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
2392423900
Value *Result = Builder.CreateIntrinsic(
2392523901
CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2392623902
if (XLen == 64)

llvm/lib/Target/RISCV/RISCVInstrInfoA.td

Lines changed: 17 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -293,12 +293,14 @@ multiclass PseudoAMOPat<string AtomicOp, Pseudo AMOInst, ValueType vt = XLenVT>
293293
}
294294

295295
class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
296-
: Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
296+
: Pat<(XLenVT (intrin (XLenVT GPR:$addr), (XLenVT GPR:$incr),
297+
(XLenVT GPR:$mask), (XLenVT timm:$ordering))),
297298
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
298299

299300
class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
300-
: Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
301-
timm:$ordering),
301+
: Pat<(XLenVT (intrin (XLenVT GPR:$addr), (XLenVT GPR:$incr),
302+
(XLenVT GPR:$mask), (XLenVT GPR:$shiftamt),
303+
(XLenVT timm:$ordering))),
302304
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
303305
timm:$ordering)>;
304306

@@ -324,49 +326,30 @@ let Size = 36 in {
324326
def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
325327
def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
326328
}
327-
} // Predicates = [HasStdExtA]
328329

329-
let Predicates = [HasStdExtA, IsRV32] in {
330-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
330+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg,
331331
PseudoMaskedAtomicSwap32>;
332-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
332+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add,
333333
PseudoMaskedAtomicLoadAdd32>;
334-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
334+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub,
335335
PseudoMaskedAtomicLoadSub32>;
336-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
336+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand,
337337
PseudoMaskedAtomicLoadNand32>;
338-
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
338+
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max,
339339
PseudoMaskedAtomicLoadMax32>;
340-
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
340+
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min,
341341
PseudoMaskedAtomicLoadMin32>;
342-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
342+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax,
343343
PseudoMaskedAtomicLoadUMax32>;
344-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
344+
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin,
345345
PseudoMaskedAtomicLoadUMin32>;
346-
} // Predicates = [HasStdExtA, IsRV32]
346+
} // Predicates = [HasStdExtA]
347347

348348
let Predicates = [HasStdExtA, IsRV64] in {
349349

350350
let Size = 20 in
351351
def PseudoAtomicLoadNand64 : PseudoAMO;
352352
defm : PseudoAMOPat<"atomic_load_nand_i64", PseudoAtomicLoadNand64, i64>;
353-
354-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
355-
PseudoMaskedAtomicSwap32>;
356-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
357-
PseudoMaskedAtomicLoadAdd32>;
358-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
359-
PseudoMaskedAtomicLoadSub32>;
360-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
361-
PseudoMaskedAtomicLoadNand32>;
362-
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
363-
PseudoMaskedAtomicLoadMax32>;
364-
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
365-
PseudoMaskedAtomicLoadMin32>;
366-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
367-
PseudoMaskedAtomicLoadUMax32>;
368-
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
369-
PseudoMaskedAtomicLoadUMin32>;
370353
} // Predicates = [HasStdExtA, IsRV64]
371354

372355

@@ -420,15 +403,9 @@ def PseudoMaskedCmpXchg32
420403
let Size = 32;
421404
}
422405

423-
def : Pat<(int_riscv_masked_cmpxchg_i32
424-
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
406+
def : Pat<(XLenVT (int_riscv_masked_cmpxchg
407+
(XLenVT GPR:$addr), (XLenVT GPR:$cmpval), (XLenVT GPR:$newval),
408+
(XLenVT GPR:$mask), (XLenVT timm:$ordering))),
425409
(PseudoMaskedCmpXchg32
426410
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
427411
} // Predicates = [HasStdExtA]
428-
429-
let Predicates = [HasStdExtA, IsRV64] in {
430-
def : Pat<(int_riscv_masked_cmpxchg_i64
431-
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
432-
(PseudoMaskedCmpXchg32
433-
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
434-
} // Predicates = [HasStdExtA, IsRV64]

0 commit comments

Comments
 (0)