@@ -1800,15 +1800,20 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
18001800 switch (Intrinsic) {
18011801 default:
18021802 return false;
1803- case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1804- case Intrinsic::riscv_masked_atomicrmw_add_i32:
1805- case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1806- case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1807- case Intrinsic::riscv_masked_atomicrmw_max_i32:
1808- case Intrinsic::riscv_masked_atomicrmw_min_i32:
1809- case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1810- case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1811- case Intrinsic::riscv_masked_cmpxchg_i32:
1803+ case Intrinsic::riscv_masked_atomicrmw_xchg:
1804+ case Intrinsic::riscv_masked_atomicrmw_add:
1805+ case Intrinsic::riscv_masked_atomicrmw_sub:
1806+ case Intrinsic::riscv_masked_atomicrmw_nand:
1807+ case Intrinsic::riscv_masked_atomicrmw_max:
1808+ case Intrinsic::riscv_masked_atomicrmw_min:
1809+ case Intrinsic::riscv_masked_atomicrmw_umax:
1810+ case Intrinsic::riscv_masked_atomicrmw_umin:
1811+ case Intrinsic::riscv_masked_cmpxchg:
1812+ // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
1813+ // narrow atomic operation. These will be expanded to an LR/SC loop that
1814+ // reads/writes to/from an aligned 4 byte location. And, or, shift, etc.
1815+ // will be used to modify the appropriate part of the 4 byte data and
1816+ // preserve the rest.
18121817 Info.opc = ISD::INTRINSIC_W_CHAIN;
18131818 Info.memVT = MVT::i32;
18141819 Info.ptrVal = I.getArgOperand(0);
@@ -21478,24 +21483,23 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
2147821483 switch (IntNo) {
2147921484 default:
2148021485 break;
21481- case Intrinsic::riscv_masked_atomicrmw_xchg_i64 :
21482- case Intrinsic::riscv_masked_atomicrmw_add_i64 :
21483- case Intrinsic::riscv_masked_atomicrmw_sub_i64 :
21484- case Intrinsic::riscv_masked_atomicrmw_nand_i64 :
21485- case Intrinsic::riscv_masked_atomicrmw_max_i64 :
21486- case Intrinsic::riscv_masked_atomicrmw_min_i64 :
21487- case Intrinsic::riscv_masked_atomicrmw_umax_i64 :
21488- case Intrinsic::riscv_masked_atomicrmw_umin_i64 :
21489- case Intrinsic::riscv_masked_cmpxchg_i64 :
21486+ case Intrinsic::riscv_masked_atomicrmw_xchg :
21487+ case Intrinsic::riscv_masked_atomicrmw_add :
21488+ case Intrinsic::riscv_masked_atomicrmw_sub :
21489+ case Intrinsic::riscv_masked_atomicrmw_nand :
21490+ case Intrinsic::riscv_masked_atomicrmw_max :
21491+ case Intrinsic::riscv_masked_atomicrmw_min :
21492+ case Intrinsic::riscv_masked_atomicrmw_umax :
21493+ case Intrinsic::riscv_masked_atomicrmw_umin :
21494+ case Intrinsic::riscv_masked_cmpxchg :
2149021495 // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
2149121496 // narrow atomic operation. These are implemented using atomic
2149221497 // operations at the minimum supported atomicrmw/cmpxchg width whose
2149321498 // result is then sign extended to XLEN. With +A, the minimum width is
2149421499 // 32 for both 64 and 32.
21495- assert(Subtarget.getXLen() == 64);
2149621500 assert(getMinCmpXchgSizeInBits() == 32);
2149721501 assert(Subtarget.hasStdExtA());
21498- return 33 ;
21502+ return Op.getValueSizeInBits() - 31 ;
2149921503 }
2150021504 break;
2150121505 }
@@ -23786,53 +23790,26 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2378623790
2378723791static Intrinsic::ID
2378823792getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
23789- if (XLen == 32) {
23790- switch (BinOp) {
23791- default:
23792- llvm_unreachable("Unexpected AtomicRMW BinOp");
23793- case AtomicRMWInst::Xchg:
23794- return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
23795- case AtomicRMWInst::Add:
23796- return Intrinsic::riscv_masked_atomicrmw_add_i32;
23797- case AtomicRMWInst::Sub:
23798- return Intrinsic::riscv_masked_atomicrmw_sub_i32;
23799- case AtomicRMWInst::Nand:
23800- return Intrinsic::riscv_masked_atomicrmw_nand_i32;
23801- case AtomicRMWInst::Max:
23802- return Intrinsic::riscv_masked_atomicrmw_max_i32;
23803- case AtomicRMWInst::Min:
23804- return Intrinsic::riscv_masked_atomicrmw_min_i32;
23805- case AtomicRMWInst::UMax:
23806- return Intrinsic::riscv_masked_atomicrmw_umax_i32;
23807- case AtomicRMWInst::UMin:
23808- return Intrinsic::riscv_masked_atomicrmw_umin_i32;
23809- }
23810- }
23811-
23812- if (XLen == 64) {
23813- switch (BinOp) {
23814- default:
23815- llvm_unreachable("Unexpected AtomicRMW BinOp");
23816- case AtomicRMWInst::Xchg:
23817- return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
23818- case AtomicRMWInst::Add:
23819- return Intrinsic::riscv_masked_atomicrmw_add_i64;
23820- case AtomicRMWInst::Sub:
23821- return Intrinsic::riscv_masked_atomicrmw_sub_i64;
23822- case AtomicRMWInst::Nand:
23823- return Intrinsic::riscv_masked_atomicrmw_nand_i64;
23824- case AtomicRMWInst::Max:
23825- return Intrinsic::riscv_masked_atomicrmw_max_i64;
23826- case AtomicRMWInst::Min:
23827- return Intrinsic::riscv_masked_atomicrmw_min_i64;
23828- case AtomicRMWInst::UMax:
23829- return Intrinsic::riscv_masked_atomicrmw_umax_i64;
23830- case AtomicRMWInst::UMin:
23831- return Intrinsic::riscv_masked_atomicrmw_umin_i64;
23832- }
23793+ switch (BinOp) {
23794+ default:
23795+ llvm_unreachable("Unexpected AtomicRMW BinOp");
23796+ case AtomicRMWInst::Xchg:
23797+ return Intrinsic::riscv_masked_atomicrmw_xchg;
23798+ case AtomicRMWInst::Add:
23799+ return Intrinsic::riscv_masked_atomicrmw_add;
23800+ case AtomicRMWInst::Sub:
23801+ return Intrinsic::riscv_masked_atomicrmw_sub;
23802+ case AtomicRMWInst::Nand:
23803+ return Intrinsic::riscv_masked_atomicrmw_nand;
23804+ case AtomicRMWInst::Max:
23805+ return Intrinsic::riscv_masked_atomicrmw_max;
23806+ case AtomicRMWInst::Min:
23807+ return Intrinsic::riscv_masked_atomicrmw_min;
23808+ case AtomicRMWInst::UMax:
23809+ return Intrinsic::riscv_masked_atomicrmw_umax;
23810+ case AtomicRMWInst::UMin:
23811+ return Intrinsic::riscv_masked_atomicrmw_umin;
2383323812 }
23834-
23835- llvm_unreachable("Unexpected XLen\n");
2383623813}
2383723814
2383823815Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
@@ -23857,7 +23834,7 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2385723834 unsigned XLen = Subtarget.getXLen();
2385823835 Value *Ordering =
2385923836 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
23860- Type *Tys[] = {AlignedAddr->getType()};
23837+ Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
2386123838 Function *LrwOpScwLoop = Intrinsic::getOrInsertDeclaration(
2386223839 AI->getModule(),
2386323840 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
@@ -23913,14 +23890,13 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2391323890 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2391423891 unsigned XLen = Subtarget.getXLen();
2391523892 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
23916- Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32 ;
23893+ Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg ;
2391723894 if (XLen == 64) {
2391823895 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2391923896 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2392023897 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
23921- CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2392223898 }
23923- Type *Tys[] = {AlignedAddr->getType()};
23899+ Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
2392423900 Value *Result = Builder.CreateIntrinsic(
2392523901 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2392623902 if (XLen == 64)
0 commit comments