Skip to content

Conversation

@akshayrdeodhar
Copy link
Contributor

This MR adds support for cmpxchg instructions with syncscope.

Adds PatFrags for matching syncscope for 3-input atomic operations in the NVPTX backend.
Handle syncscope correctly for emulation loops in AtomicExpand, in bracketInstructionWithFences.
Modifies emitLeadingFence, emitTrailingFence to accept SyncScope as a parameter. Modifies implementation of these in other backends, with
Tests for all possible combinations of the cmpxchg instruction (with modifications to cmpxchg.py)

@llvmbot
Copy link
Member

llvmbot commented May 20, 2025

@llvm/pr-subscribers-backend-nvptx

@llvm/pr-subscribers-backend-risc-v

Author: Akshay Deodhar (akshayrdeodhar)

Changes

This MR adds support for cmpxchg instructions with syncscope.

Adds PatFrags for matching syncscope for 3-input atomic operations in the NVPTX backend.
Handle syncscope correctly for emulation loops in AtomicExpand, in bracketInstructionWithFences.
Modifies emitLeadingFence, emitTrailingFence to accept SyncScope as a parameter. Modifies implementation of these in other backends, with
Tests for all possible combinations of the cmpxchg instruction (with modifications to cmpxchg.py)


Patch is 2.76 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140807.diff

18 Files Affected:

  • (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+9-7)
  • (modified) llvm/lib/CodeGen/AtomicExpandPass.cpp (+13-5)
  • (modified) llvm/lib/CodeGen/TargetLoweringBase.cpp (+6-4)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+6-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+8-5)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXIntrinsics.td (+50-17)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+8-4)
  • (modified) llvm/test/CodeGen/NVPTX/atomics-sm90.ll (+4-4)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm60.ll (+13746-2394)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm70.ll (+13795-2443)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm90.ll (+19480-2452)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.ll (+20-20)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.py (+9-4)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index ac9ab7f7fd210..265f1fd724237 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2286,13 +2286,15 @@ class TargetLoweringBase {
   ///   standard ABI uses a fence before a seq_cst load instead of after a
   ///   seq_cst store).
   /// @{
-  virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
-                                        Instruction *Inst,
-                                        AtomicOrdering Ord) const;
-
-  virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
-                                         Instruction *Inst,
-                                         AtomicOrdering Ord) const;
+  virtual Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const;
+
+  virtual Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const;
   /// @}
 
   // Emits code that executes when the comparison result in the ll/sc
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index a3e9700fa3089..1b9e0056eae74 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -65,7 +65,8 @@ class AtomicExpandImpl {
   const DataLayout *DL = nullptr;
 
 private:
-  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
+  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
+                             SyncScope::ID SSID = SyncScope::System);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
   LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
   bool tryExpandAtomicLoad(LoadInst *LI);
@@ -303,6 +304,7 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
 
   if (TLI->shouldInsertFencesForAtomic(I)) {
     auto FenceOrdering = AtomicOrdering::Monotonic;
+    SyncScope::ID SSID = SyncScope::System;
     if (LI && isAcquireOrStronger(LI->getOrdering())) {
       FenceOrdering = LI->getOrdering();
       LI->setOrdering(AtomicOrdering::Monotonic);
@@ -325,13 +327,18 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
       // expandAtomicCmpXchg in that case.
       FenceOrdering = CASI->getMergedOrdering();
       auto CASOrdering = TLI->atomicOperationOrderAfterFenceSplit(CASI);
+      SSID = CASI->getSyncScopeID();
 
       CASI->setSuccessOrdering(CASOrdering);
       CASI->setFailureOrdering(CASOrdering);
+      // If CAS ordering is monotonic, then the operation will
+      // take default scope. Otherwise, it will retain its scope
+      if (CASOrdering != AtomicOrdering::Monotonic)
+        CASI->setSyncScopeID(SSID);
     }
 
     if (FenceOrdering != AtomicOrdering::Monotonic) {
-      MadeChange |= bracketInstWithFences(I, FenceOrdering);
+      MadeChange |= bracketInstWithFences(I, FenceOrdering, SSID);
     }
   } else if (I->hasAtomicStore() &&
              TLI->shouldInsertTrailingFenceForAtomicStore(I)) {
@@ -432,12 +439,13 @@ PreservedAnalyses AtomicExpandPass::run(Function &F,
 }
 
 bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
-                                             AtomicOrdering Order) {
+                                             AtomicOrdering Order,
+                                             SyncScope::ID SSID) {
   ReplacementIRBuilder Builder(I, *DL);
 
-  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
+  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order, SSID);
 
-  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
+  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order, SSID);
   // We have a guard here because not every atomic operation generates a
   // trailing fence.
   if (TrailingFence)
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f5ea3c0b47d6a..61d8b1de30ff7 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2320,18 +2320,20 @@ TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
 
 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
 
 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (isAcquireOrStronger(Ord))
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e3dc337bd0843..9bd5166c19c24 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21221,7 +21221,8 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
@@ -21246,7 +21247,8 @@ Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 9fad056edd3f1..da09eca2b946f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -666,10 +666,12 @@ class VectorType;
     void
     emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
 
-    Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                  AtomicOrdering Ord) const override;
-    Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                   AtomicOrdering Ord) const override;
+    Instruction *emitLeadingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
+    Instruction *emitTrailingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
 
     unsigned getMaxSupportedInterleaveFactor() const override;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3e755c25fd91a..946c44ac82abb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -6055,7 +6055,8 @@ AtomicOrdering NVPTXTargetLowering::atomicOperationOrderAfterFenceSplit(
 
 Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
 
@@ -6063,15 +6064,17 @@ Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
   // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
   if (isReleaseOrStronger(Ord))
     return Ord == AtomicOrdering::SequentiallyConsistent
-               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent)
-               : Builder.CreateFence(AtomicOrdering::Release);
+               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent,
+                                     SSID)
+               : Builder.CreateFence(AtomicOrdering::Release, SSID);
 
   return nullptr;
 }
 
 Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                     Instruction *Inst,
-                                                    AtomicOrdering Ord) const {
+                                                    AtomicOrdering Ord,
+                                                    SyncScope::ID SSID) const {
   // Specialize for cmpxchg
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
@@ -6084,7 +6087,7 @@ Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
   if (isAcquireOrStronger(Ord) &&
       (Ord != AtomicOrdering::SequentiallyConsistent ||
        CASWidth < STI.getMinCmpXchgSizeInBits()))
-    return Builder.CreateFence(AtomicOrdering::Acquire);
+    return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
 
   return nullptr;
 }
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index f41c569a65544..07304adf21ac2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -266,10 +266,14 @@ class NVPTXTargetLowering : public TargetLowering {
   AtomicOrdering
   atomicOperationOrderAfterFenceSplit(const Instruction *I) const override;
 
-  Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                AtomicOrdering Ord) const override;
-  Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                 AtomicOrdering Ord) const override;
+  Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const override;
+  Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const override;
 
 private:
   const NVPTXSubtarget &STI; // cache the subtarget here
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 7d7e69adafcd0..e02c335bc8d13 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -38,6 +38,27 @@ def AS_match {
   }];
 }
 
+multiclass nvvm_ternary_atomic_op_scoped<SDPatternOperator frag> {
+  defvar frag_pat = (frag node:$ptr, node:$cmp, node:$val);
+  def NAME#_cta: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Block;
+  }]>;
+  def NAME#_cluster : PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Cluster;
+  }]>;
+  def NAME#_gpu: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Device;
+  }]>;
+  def NAME#_sys: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::System;
+  }]>;
+}
+
+
 // A node that will be replaced with the current PTX version.
 class PTX {
   SDNodeXForm PTXVerXform = SDNodeXForm<imm, [{
@@ -2022,40 +2043,41 @@ multiclass F_ATOMIC_2_NEG<ValueType regT, NVPTXRegClass regclass, string SpaceSt
 
 // has 3 operands
 multiclass F_ATOMIC_3_imp<ValueType ptrT, NVPTXRegClass ptrclass,
-  ValueType regT, NVPTXRegClass regclass, string SemStr,
-  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
-  Operand IMMType, list<Predicate> Pred> {
+  ValueType regT, NVPTXRegClass regclass, string SemStr, 
+  string ScopeStr, string SpaceStr, string TypeStr, string OpcStr, 
+  PatFrag IntOp, Operand IMMType, list<Predicate> Pred> {
   let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in {
     def reg : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm1 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm2 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), imm:$c))]>,
     Requires<Pred>;
 
     def imm3 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, imm:$c))]>,
     Requires<Pred>;
   }
 }
-multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string SpaceStr,
-  string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, list<Predicate> Pred = []> {
-  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string ScopeStr, 
+  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, 
+  list<Predicate> Pred = []> {
+  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
-  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
 }
 
@@ -2469,10 +2491,12 @@ foreach size = ["i16", "i32", "i64"] in {
 //            ".cas", atomic_cmp_swap_i32_acquire_global, i32imm,
 //            [hasSM<70>, hasPTX<63>]>
 multiclass INT_PTX_ATOM_CAS<string atomic_cmp_swap_pat, string type,
-                           string order, string addrspace, list<Predicate> preds>
+                           string order, string scope, string addrspace, 
+                           list<Predicate> preds>
     : F_ATOMIC_3<!cast<ValueType>("i"#type),
                  !cast<NVPTXRegClass>("Int"#type#"Regs"),
                  order,
+                 scope,
                  addrspace,
                  ".b"#type,
                  ".cas",
@@ -2487,26 +2511,35 @@ foreach size = ["32", "64"] in {
     defvar cas_addrspace_string = !if(!eq(addrspace, "generic"), "", "."#addrspace);
     foreach order = ["acquire", "release", "acq_rel", "monotonic"] in {
       defvar cas_order_string = !if(!eq(order, "monotonic"), ".relaxed", "."#order);
+      defvar atomic_cmp_swap_pat = !cast<PatFrag>("atomic_cmp_swap_i"#size#_#order#_#addrspace);
+      defm atomic_cmp_swap_i#size#_#order#_#addrspace: nvvm_ternary_atomic_op_scoped<atomic_cmp_swap_pat>;
+
+      foreach scope = ["cta", "cluster", "gpu", "sys"] in {
+        defm INT_PTX_ATOM_CAS_#size#_#order#addrspace#scope
+          : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace#_#scope, size,
+                             cas_order_string, "."#scope, cas_addrspace_string,
+                             [hasSM<70>, hasPTX<63>]>;
+      }
       // Note that AtomicExpand will convert cmpxchg seq_cst to a cmpxchg monotonic with fences around it.
       // Memory orders are only supported for SM70+, PTX63+- so we have two sets of instruction definitions-
       // for SM70+, and "old" ones which lower to "atom.cas", for earlier archs.
       defm INT_PTX_ATOM_CAS_#size#_#order#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           cas_order_string, cas_addrspace_string,
+                           cas_order_string, "", cas_addrspace_string,
                            [hasSM<70>, hasPTX<63>]>;
       defm INT_PTX_ATOM_CAS_#size#_#order#_old#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           "", cas_addrspace_string, []>;
+                           "", "", cas_addrspace_string, []>;
     }
   }
 }
 
 // Note that 16-bit CAS support in PTX is emulated.
-defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", ".global", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".global", ".b16", ".cas",
   atomic_cmp_swap_i16_global, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", ".shared", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".shared", ".b16", ".cas",
   atomic_cmp_swap_i16_shared, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", "", ".b16", ".cas",
   atomic_cmp_swap_i16_generic, i16imm, [hasSM<70>, hasPTX<63>]>;
 
 // Support for scoped atomic operations.  Matches
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 91df5f467e59c..53a3fcc1008b7 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12430,7 +12430,8 @@ static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   if (Ord == AtomicOrdering::SequentiallyConsistent)
     return callIntrinsic(Builder, Intrinsic::ppc_sync);
   if (isReleaseOrStronger(Ord))
@@ -12440,7 +12441,8 @@ Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst...
[truncated]

@llvmbot
Copy link
Member

llvmbot commented May 20, 2025

@llvm/pr-subscribers-backend-arm

Author: Akshay Deodhar (akshayrdeodhar)

Changes

This MR adds support for cmpxchg instructions with syncscope.

Adds PatFrags for matching syncscope for 3-input atomic operations in the NVPTX backend.
Handle syncscope correctly for emulation loops in AtomicExpand, in bracketInstructionWithFences.
Modifies emitLeadingFence, emitTrailingFence to accept SyncScope as a parameter. Modifies implementation of these in other backends, with
Tests for all possible combinations of the cmpxchg instruction (with modifications to cmpxchg.py)


Patch is 2.76 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140807.diff

18 Files Affected:

  • (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+9-7)
  • (modified) llvm/lib/CodeGen/AtomicExpandPass.cpp (+13-5)
  • (modified) llvm/lib/CodeGen/TargetLoweringBase.cpp (+6-4)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+6-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+8-5)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXIntrinsics.td (+50-17)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+8-4)
  • (modified) llvm/test/CodeGen/NVPTX/atomics-sm90.ll (+4-4)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm60.ll (+13746-2394)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm70.ll (+13795-2443)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm90.ll (+19480-2452)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.ll (+20-20)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.py (+9-4)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index ac9ab7f7fd210..265f1fd724237 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2286,13 +2286,15 @@ class TargetLoweringBase {
   ///   standard ABI uses a fence before a seq_cst load instead of after a
   ///   seq_cst store).
   /// @{
-  virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
-                                        Instruction *Inst,
-                                        AtomicOrdering Ord) const;
-
-  virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
-                                         Instruction *Inst,
-                                         AtomicOrdering Ord) const;
+  virtual Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const;
+
+  virtual Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const;
   /// @}
 
   // Emits code that executes when the comparison result in the ll/sc
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index a3e9700fa3089..1b9e0056eae74 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -65,7 +65,8 @@ class AtomicExpandImpl {
   const DataLayout *DL = nullptr;
 
 private:
-  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
+  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
+                             SyncScope::ID SSID = SyncScope::System);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
   LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
   bool tryExpandAtomicLoad(LoadInst *LI);
@@ -303,6 +304,7 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
 
   if (TLI->shouldInsertFencesForAtomic(I)) {
     auto FenceOrdering = AtomicOrdering::Monotonic;
+    SyncScope::ID SSID = SyncScope::System;
     if (LI && isAcquireOrStronger(LI->getOrdering())) {
       FenceOrdering = LI->getOrdering();
       LI->setOrdering(AtomicOrdering::Monotonic);
@@ -325,13 +327,18 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
       // expandAtomicCmpXchg in that case.
       FenceOrdering = CASI->getMergedOrdering();
       auto CASOrdering = TLI->atomicOperationOrderAfterFenceSplit(CASI);
+      SSID = CASI->getSyncScopeID();
 
       CASI->setSuccessOrdering(CASOrdering);
       CASI->setFailureOrdering(CASOrdering);
+      // If CAS ordering is monotonic, then the operation will
+      // take default scope. Otherwise, it will retain its scope
+      if (CASOrdering != AtomicOrdering::Monotonic)
+        CASI->setSyncScopeID(SSID);
     }
 
     if (FenceOrdering != AtomicOrdering::Monotonic) {
-      MadeChange |= bracketInstWithFences(I, FenceOrdering);
+      MadeChange |= bracketInstWithFences(I, FenceOrdering, SSID);
     }
   } else if (I->hasAtomicStore() &&
              TLI->shouldInsertTrailingFenceForAtomicStore(I)) {
@@ -432,12 +439,13 @@ PreservedAnalyses AtomicExpandPass::run(Function &F,
 }
 
 bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
-                                             AtomicOrdering Order) {
+                                             AtomicOrdering Order,
+                                             SyncScope::ID SSID) {
   ReplacementIRBuilder Builder(I, *DL);
 
-  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
+  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order, SSID);
 
-  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
+  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order, SSID);
   // We have a guard here because not every atomic operation generates a
   // trailing fence.
   if (TrailingFence)
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f5ea3c0b47d6a..61d8b1de30ff7 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2320,18 +2320,20 @@ TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
 
 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
 
 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (isAcquireOrStronger(Ord))
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e3dc337bd0843..9bd5166c19c24 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21221,7 +21221,8 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
@@ -21246,7 +21247,8 @@ Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 9fad056edd3f1..da09eca2b946f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -666,10 +666,12 @@ class VectorType;
     void
     emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
 
-    Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                  AtomicOrdering Ord) const override;
-    Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                   AtomicOrdering Ord) const override;
+    Instruction *emitLeadingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
+    Instruction *emitTrailingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
 
     unsigned getMaxSupportedInterleaveFactor() const override;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3e755c25fd91a..946c44ac82abb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -6055,7 +6055,8 @@ AtomicOrdering NVPTXTargetLowering::atomicOperationOrderAfterFenceSplit(
 
 Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
 
@@ -6063,15 +6064,17 @@ Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
   // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
   if (isReleaseOrStronger(Ord))
     return Ord == AtomicOrdering::SequentiallyConsistent
-               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent)
-               : Builder.CreateFence(AtomicOrdering::Release);
+               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent,
+                                     SSID)
+               : Builder.CreateFence(AtomicOrdering::Release, SSID);
 
   return nullptr;
 }
 
 Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                     Instruction *Inst,
-                                                    AtomicOrdering Ord) const {
+                                                    AtomicOrdering Ord,
+                                                    SyncScope::ID SSID) const {
   // Specialize for cmpxchg
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
@@ -6084,7 +6087,7 @@ Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
   if (isAcquireOrStronger(Ord) &&
       (Ord != AtomicOrdering::SequentiallyConsistent ||
        CASWidth < STI.getMinCmpXchgSizeInBits()))
-    return Builder.CreateFence(AtomicOrdering::Acquire);
+    return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
 
   return nullptr;
 }
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index f41c569a65544..07304adf21ac2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -266,10 +266,14 @@ class NVPTXTargetLowering : public TargetLowering {
   AtomicOrdering
   atomicOperationOrderAfterFenceSplit(const Instruction *I) const override;
 
-  Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                AtomicOrdering Ord) const override;
-  Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                 AtomicOrdering Ord) const override;
+  Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const override;
+  Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const override;
 
 private:
   const NVPTXSubtarget &STI; // cache the subtarget here
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 7d7e69adafcd0..e02c335bc8d13 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -38,6 +38,27 @@ def AS_match {
   }];
 }
 
+multiclass nvvm_ternary_atomic_op_scoped<SDPatternOperator frag> {
+  defvar frag_pat = (frag node:$ptr, node:$cmp, node:$val);
+  def NAME#_cta: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Block;
+  }]>;
+  def NAME#_cluster : PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Cluster;
+  }]>;
+  def NAME#_gpu: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Device;
+  }]>;
+  def NAME#_sys: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::System;
+  }]>;
+}
+
+
 // A node that will be replaced with the current PTX version.
 class PTX {
   SDNodeXForm PTXVerXform = SDNodeXForm<imm, [{
@@ -2022,40 +2043,41 @@ multiclass F_ATOMIC_2_NEG<ValueType regT, NVPTXRegClass regclass, string SpaceSt
 
 // has 3 operands
 multiclass F_ATOMIC_3_imp<ValueType ptrT, NVPTXRegClass ptrclass,
-  ValueType regT, NVPTXRegClass regclass, string SemStr,
-  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
-  Operand IMMType, list<Predicate> Pred> {
+  ValueType regT, NVPTXRegClass regclass, string SemStr, 
+  string ScopeStr, string SpaceStr, string TypeStr, string OpcStr, 
+  PatFrag IntOp, Operand IMMType, list<Predicate> Pred> {
   let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in {
     def reg : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm1 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm2 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), imm:$c))]>,
     Requires<Pred>;
 
     def imm3 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, imm:$c))]>,
     Requires<Pred>;
   }
 }
-multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string SpaceStr,
-  string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, list<Predicate> Pred = []> {
-  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string ScopeStr, 
+  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, 
+  list<Predicate> Pred = []> {
+  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
-  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
 }
 
@@ -2469,10 +2491,12 @@ foreach size = ["i16", "i32", "i64"] in {
 //            ".cas", atomic_cmp_swap_i32_acquire_global, i32imm,
 //            [hasSM<70>, hasPTX<63>]>
 multiclass INT_PTX_ATOM_CAS<string atomic_cmp_swap_pat, string type,
-                           string order, string addrspace, list<Predicate> preds>
+                           string order, string scope, string addrspace, 
+                           list<Predicate> preds>
     : F_ATOMIC_3<!cast<ValueType>("i"#type),
                  !cast<NVPTXRegClass>("Int"#type#"Regs"),
                  order,
+                 scope,
                  addrspace,
                  ".b"#type,
                  ".cas",
@@ -2487,26 +2511,35 @@ foreach size = ["32", "64"] in {
     defvar cas_addrspace_string = !if(!eq(addrspace, "generic"), "", "."#addrspace);
     foreach order = ["acquire", "release", "acq_rel", "monotonic"] in {
       defvar cas_order_string = !if(!eq(order, "monotonic"), ".relaxed", "."#order);
+      defvar atomic_cmp_swap_pat = !cast<PatFrag>("atomic_cmp_swap_i"#size#_#order#_#addrspace);
+      defm atomic_cmp_swap_i#size#_#order#_#addrspace: nvvm_ternary_atomic_op_scoped<atomic_cmp_swap_pat>;
+
+      foreach scope = ["cta", "cluster", "gpu", "sys"] in {
+        defm INT_PTX_ATOM_CAS_#size#_#order#addrspace#scope
+          : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace#_#scope, size,
+                             cas_order_string, "."#scope, cas_addrspace_string,
+                             [hasSM<70>, hasPTX<63>]>;
+      }
       // Note that AtomicExpand will convert cmpxchg seq_cst to a cmpxchg monotonic with fences around it.
       // Memory orders are only supported for SM70+, PTX63+- so we have two sets of instruction definitions-
       // for SM70+, and "old" ones which lower to "atom.cas", for earlier archs.
       defm INT_PTX_ATOM_CAS_#size#_#order#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           cas_order_string, cas_addrspace_string,
+                           cas_order_string, "", cas_addrspace_string,
                            [hasSM<70>, hasPTX<63>]>;
       defm INT_PTX_ATOM_CAS_#size#_#order#_old#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           "", cas_addrspace_string, []>;
+                           "", "", cas_addrspace_string, []>;
     }
   }
 }
 
 // Note that 16-bit CAS support in PTX is emulated.
-defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", ".global", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".global", ".b16", ".cas",
   atomic_cmp_swap_i16_global, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", ".shared", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".shared", ".b16", ".cas",
   atomic_cmp_swap_i16_shared, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", "", ".b16", ".cas",
   atomic_cmp_swap_i16_generic, i16imm, [hasSM<70>, hasPTX<63>]>;
 
 // Support for scoped atomic operations.  Matches
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 91df5f467e59c..53a3fcc1008b7 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12430,7 +12430,8 @@ static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   if (Ord == AtomicOrdering::SequentiallyConsistent)
     return callIntrinsic(Builder, Intrinsic::ppc_sync);
   if (isReleaseOrStronger(Ord))
@@ -12440,7 +12441,8 @@ Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst...
[truncated]

@llvmbot
Copy link
Member

llvmbot commented May 20, 2025

@llvm/pr-subscribers-backend-powerpc

Author: Akshay Deodhar (akshayrdeodhar)

Changes

This MR adds support for cmpxchg instructions with syncscope.

Adds PatFrags for matching syncscope for 3-input atomic operations in the NVPTX backend.
Handle syncscope correctly for emulation loops in AtomicExpand, in bracketInstructionWithFences.
Modifies emitLeadingFence, emitTrailingFence to accept SyncScope as a parameter. Modifies implementation of these in other backends, with
Tests for all possible combinations of the cmpxchg instruction (with modifications to cmpxchg.py)


Patch is 2.76 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140807.diff

18 Files Affected:

  • (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+9-7)
  • (modified) llvm/lib/CodeGen/AtomicExpandPass.cpp (+13-5)
  • (modified) llvm/lib/CodeGen/TargetLoweringBase.cpp (+6-4)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+6-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+8-5)
  • (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/NVPTX/NVPTXIntrinsics.td (+50-17)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/PowerPC/PPCISelLowering.h (+8-4)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+4-2)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+8-4)
  • (modified) llvm/test/CodeGen/NVPTX/atomics-sm90.ll (+4-4)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm60.ll (+13746-2394)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm70.ll (+13795-2443)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg-sm90.ll (+19480-2452)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.ll (+20-20)
  • (modified) llvm/test/CodeGen/NVPTX/cmpxchg.py (+9-4)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index ac9ab7f7fd210..265f1fd724237 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2286,13 +2286,15 @@ class TargetLoweringBase {
   ///   standard ABI uses a fence before a seq_cst load instead of after a
   ///   seq_cst store).
   /// @{
-  virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
-                                        Instruction *Inst,
-                                        AtomicOrdering Ord) const;
-
-  virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
-                                         Instruction *Inst,
-                                         AtomicOrdering Ord) const;
+  virtual Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const;
+
+  virtual Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const;
   /// @}
 
   // Emits code that executes when the comparison result in the ll/sc
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index a3e9700fa3089..1b9e0056eae74 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -65,7 +65,8 @@ class AtomicExpandImpl {
   const DataLayout *DL = nullptr;
 
 private:
-  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
+  bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
+                             SyncScope::ID SSID = SyncScope::System);
   IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
   LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
   bool tryExpandAtomicLoad(LoadInst *LI);
@@ -303,6 +304,7 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
 
   if (TLI->shouldInsertFencesForAtomic(I)) {
     auto FenceOrdering = AtomicOrdering::Monotonic;
+    SyncScope::ID SSID = SyncScope::System;
     if (LI && isAcquireOrStronger(LI->getOrdering())) {
       FenceOrdering = LI->getOrdering();
       LI->setOrdering(AtomicOrdering::Monotonic);
@@ -325,13 +327,18 @@ bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
       // expandAtomicCmpXchg in that case.
       FenceOrdering = CASI->getMergedOrdering();
       auto CASOrdering = TLI->atomicOperationOrderAfterFenceSplit(CASI);
+      SSID = CASI->getSyncScopeID();
 
       CASI->setSuccessOrdering(CASOrdering);
       CASI->setFailureOrdering(CASOrdering);
+      // If CAS ordering is monotonic, then the operation will
+      // take default scope. Otherwise, it will retain its scope
+      if (CASOrdering != AtomicOrdering::Monotonic)
+        CASI->setSyncScopeID(SSID);
     }
 
     if (FenceOrdering != AtomicOrdering::Monotonic) {
-      MadeChange |= bracketInstWithFences(I, FenceOrdering);
+      MadeChange |= bracketInstWithFences(I, FenceOrdering, SSID);
     }
   } else if (I->hasAtomicStore() &&
              TLI->shouldInsertTrailingFenceForAtomicStore(I)) {
@@ -432,12 +439,13 @@ PreservedAnalyses AtomicExpandPass::run(Function &F,
 }
 
 bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
-                                             AtomicOrdering Order) {
+                                             AtomicOrdering Order,
+                                             SyncScope::ID SSID) {
   ReplacementIRBuilder Builder(I, *DL);
 
-  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
+  auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order, SSID);
 
-  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
+  auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order, SSID);
   // We have a guard here because not every atomic operation generates a
   // trailing fence.
   if (TrailingFence)
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f5ea3c0b47d6a..61d8b1de30ff7 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2320,18 +2320,20 @@ TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
 
 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
 
 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (isAcquireOrStronger(Ord))
-    return Builder.CreateFence(Ord);
+    return Builder.CreateFence(Ord, SSID);
   else
     return nullptr;
 }
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e3dc337bd0843..9bd5166c19c24 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21221,7 +21221,8 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
@@ -21246,7 +21247,8 @@ Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
-                                                  AtomicOrdering Ord) const {
+                                                  AtomicOrdering Ord,
+                                                  SyncScope::ID SSID) const {
   switch (Ord) {
   case AtomicOrdering::NotAtomic:
   case AtomicOrdering::Unordered:
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 9fad056edd3f1..da09eca2b946f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -666,10 +666,12 @@ class VectorType;
     void
     emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
 
-    Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                  AtomicOrdering Ord) const override;
-    Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                   AtomicOrdering Ord) const override;
+    Instruction *emitLeadingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
+    Instruction *emitTrailingFence(
+        IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord,
+        SyncScope::ID SSID = SyncScope::ID SyncScope::System) const override;
 
     unsigned getMaxSupportedInterleaveFactor() const override;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3e755c25fd91a..946c44ac82abb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -6055,7 +6055,8 @@ AtomicOrdering NVPTXTargetLowering::atomicOperationOrderAfterFenceSplit(
 
 Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
-                                                   AtomicOrdering Ord) const {
+                                                   AtomicOrdering Ord,
+                                                   SyncScope::ID SSID) const {
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
 
@@ -6063,15 +6064,17 @@ Instruction *NVPTXTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
   // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
   if (isReleaseOrStronger(Ord))
     return Ord == AtomicOrdering::SequentiallyConsistent
-               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent)
-               : Builder.CreateFence(AtomicOrdering::Release);
+               ? Builder.CreateFence(AtomicOrdering::SequentiallyConsistent,
+                                     SSID)
+               : Builder.CreateFence(AtomicOrdering::Release, SSID);
 
   return nullptr;
 }
 
 Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                     Instruction *Inst,
-                                                    AtomicOrdering Ord) const {
+                                                    AtomicOrdering Ord,
+                                                    SyncScope::ID SSID) const {
   // Specialize for cmpxchg
   if (!isa<AtomicCmpXchgInst>(Inst))
     return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
@@ -6084,7 +6087,7 @@ Instruction *NVPTXTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
   if (isAcquireOrStronger(Ord) &&
       (Ord != AtomicOrdering::SequentiallyConsistent ||
        CASWidth < STI.getMinCmpXchgSizeInBits()))
-    return Builder.CreateFence(AtomicOrdering::Acquire);
+    return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
 
   return nullptr;
 }
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index f41c569a65544..07304adf21ac2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -266,10 +266,14 @@ class NVPTXTargetLowering : public TargetLowering {
   AtomicOrdering
   atomicOperationOrderAfterFenceSplit(const Instruction *I) const override;
 
-  Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                AtomicOrdering Ord) const override;
-  Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
-                                 AtomicOrdering Ord) const override;
+  Instruction *
+  emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+                   AtomicOrdering Ord,
+                   SyncScope::ID SSID = SyncScope::System) const override;
+  Instruction *
+  emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+                    AtomicOrdering Ord,
+                    SyncScope::ID SSID = SyncScope::System) const override;
 
 private:
   const NVPTXSubtarget &STI; // cache the subtarget here
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 7d7e69adafcd0..e02c335bc8d13 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -38,6 +38,27 @@ def AS_match {
   }];
 }
 
+multiclass nvvm_ternary_atomic_op_scoped<SDPatternOperator frag> {
+  defvar frag_pat = (frag node:$ptr, node:$cmp, node:$val);
+  def NAME#_cta: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Block;
+  }]>;
+  def NAME#_cluster : PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Cluster;
+  }]>;
+  def NAME#_gpu: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::Device;
+  }]>;
+  def NAME#_sys: PatFrag<!setdagop(frag_pat, ops),
+      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
+        return Scopes[cast<MemSDNode>(N)->getSyncScopeID()] == NVPTX::Scope::System;
+  }]>;
+}
+
+
 // A node that will be replaced with the current PTX version.
 class PTX {
   SDNodeXForm PTXVerXform = SDNodeXForm<imm, [{
@@ -2022,40 +2043,41 @@ multiclass F_ATOMIC_2_NEG<ValueType regT, NVPTXRegClass regclass, string SpaceSt
 
 // has 3 operands
 multiclass F_ATOMIC_3_imp<ValueType ptrT, NVPTXRegClass ptrclass,
-  ValueType regT, NVPTXRegClass regclass, string SemStr,
-  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
-  Operand IMMType, list<Predicate> Pred> {
+  ValueType regT, NVPTXRegClass regclass, string SemStr, 
+  string ScopeStr, string SpaceStr, string TypeStr, string OpcStr, 
+  PatFrag IntOp, Operand IMMType, list<Predicate> Pred> {
   let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in {
     def reg : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm1 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, regclass:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, (regT regclass:$c)))]>,
     Requires<Pred>;
 
     def imm2 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, regclass:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), imm:$c))]>,
     Requires<Pred>;
 
     def imm3 : NVPTXInst<(outs regclass:$dst),
       (ins ptrclass:$addr, IMMType:$b, IMMType:$c),
-      !strconcat("atom", SemStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
+      !strconcat("atom", SemStr, ScopeStr, SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
       [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, imm:$c))]>,
     Requires<Pred>;
   }
 }
-multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string SpaceStr,
-  string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, list<Predicate> Pred = []> {
-  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SemStr, string ScopeStr, 
+  string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType, 
+  list<Predicate> Pred = []> {
+  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
-  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, SpaceStr, TypeStr,
+  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SemStr, ScopeStr, SpaceStr, TypeStr,
     OpcStr, IntOp, IMMType, Pred>;
 }
 
@@ -2469,10 +2491,12 @@ foreach size = ["i16", "i32", "i64"] in {
 //            ".cas", atomic_cmp_swap_i32_acquire_global, i32imm,
 //            [hasSM<70>, hasPTX<63>]>
 multiclass INT_PTX_ATOM_CAS<string atomic_cmp_swap_pat, string type,
-                           string order, string addrspace, list<Predicate> preds>
+                           string order, string scope, string addrspace, 
+                           list<Predicate> preds>
     : F_ATOMIC_3<!cast<ValueType>("i"#type),
                  !cast<NVPTXRegClass>("Int"#type#"Regs"),
                  order,
+                 scope,
                  addrspace,
                  ".b"#type,
                  ".cas",
@@ -2487,26 +2511,35 @@ foreach size = ["32", "64"] in {
     defvar cas_addrspace_string = !if(!eq(addrspace, "generic"), "", "."#addrspace);
     foreach order = ["acquire", "release", "acq_rel", "monotonic"] in {
       defvar cas_order_string = !if(!eq(order, "monotonic"), ".relaxed", "."#order);
+      defvar atomic_cmp_swap_pat = !cast<PatFrag>("atomic_cmp_swap_i"#size#_#order#_#addrspace);
+      defm atomic_cmp_swap_i#size#_#order#_#addrspace: nvvm_ternary_atomic_op_scoped<atomic_cmp_swap_pat>;
+
+      foreach scope = ["cta", "cluster", "gpu", "sys"] in {
+        defm INT_PTX_ATOM_CAS_#size#_#order#addrspace#scope
+          : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace#_#scope, size,
+                             cas_order_string, "."#scope, cas_addrspace_string,
+                             [hasSM<70>, hasPTX<63>]>;
+      }
       // Note that AtomicExpand will convert cmpxchg seq_cst to a cmpxchg monotonic with fences around it.
       // Memory orders are only supported for SM70+, PTX63+- so we have two sets of instruction definitions-
       // for SM70+, and "old" ones which lower to "atom.cas", for earlier archs.
       defm INT_PTX_ATOM_CAS_#size#_#order#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           cas_order_string, cas_addrspace_string,
+                           cas_order_string, "", cas_addrspace_string,
                            [hasSM<70>, hasPTX<63>]>;
       defm INT_PTX_ATOM_CAS_#size#_#order#_old#addrspace
         : INT_PTX_ATOM_CAS<"atomic_cmp_swap_i"#size#_#order#_#addrspace, size,
-                           "", cas_addrspace_string, []>;
+                           "", "", cas_addrspace_string, []>;
     }
   }
 }
 
 // Note that 16-bit CAS support in PTX is emulated.
-defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", ".global", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_G_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".global", ".b16", ".cas",
   atomic_cmp_swap_i16_global, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", ".shared", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_S_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".shared", ".b16", ".cas",
   atomic_cmp_swap_i16_shared, i16imm, [hasSM<70>, hasPTX<63>]>;
-defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", ".b16", ".cas",
+defm INT_PTX_ATOM_CAS_GEN_16 : F_ATOMIC_3<i16, Int16Regs, "", "", "", ".b16", ".cas",
   atomic_cmp_swap_i16_generic, i16imm, [hasSM<70>, hasPTX<63>]>;
 
 // Support for scoped atomic operations.  Matches
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 91df5f467e59c..53a3fcc1008b7 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12430,7 +12430,8 @@ static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
-                                                 AtomicOrdering Ord) const {
+                                                 AtomicOrdering Ord,
+                                                 SyncScope::ID SSID) const {
   if (Ord == AtomicOrdering::SequentiallyConsistent)
     return callIntrinsic(Builder, Intrinsic::ppc_sync);
   if (isReleaseOrStronger(Ord))
@@ -12440,7 +12441,8 @@ Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
 
 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst...
[truncated]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants