Skip to content

Commit 5d38cdd

Browse files
wenju-heCopilot
andauthored
[Clang] Add __scoped_atomic_uinc_wrap and __scoped_atomic_udec_wrap builtins (llvm#168666)
This PR extends __scoped_atomic builtins with inc and dec functions. They map to LLVM IR `atomicrmw uinc_wrap` and `atomicrmw udec_wrap`. These enable implementation of OpenCL-style atomic_inc / atomic_dec with wrap semantics on targets supporting scoped atomics (e.g. GPUs). --------- Co-authored-by: Copilot <[email protected]>
1 parent 3f22ed1 commit 5d38cdd

File tree

8 files changed

+163
-4
lines changed

8 files changed

+163
-4
lines changed

clang/docs/LanguageExtensions.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4854,6 +4854,14 @@ memory scope argument. These are designed to be a generic alternative to the
48544854
``__opencl_atomic_*`` builtin functions for targets that support atomic memory
48554855
scopes.
48564856
4857+
Clang provides two additional __scoped_atomic builtins:
4858+
4859+
* ``__scoped_atomic_uinc_wrap``
4860+
* ``__scoped_atomic_udec_wrap``
4861+
4862+
See LLVM IR `atomicrmw <https://llvm.org/docs/LangRef.html#atomicrmw-instruction>`_
4863+
instruction for the semantics of uinc_wrap and udec_wrap.
4864+
48574865
Atomic memory scopes are designed to assist optimizations for systems with
48584866
several levels of memory hierarchy like GPUs. The following memory scopes are
48594867
currently supported:

clang/docs/ReleaseNotes.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,8 @@ C23 Feature Support
230230

231231
Non-comprehensive list of changes in this release
232232
-------------------------------------------------
233+
- Added ``__scoped_atomic_uinc_wrap`` and ``__scoped_atomic_udec_wrap``.
234+
233235
- Removed OpenCL header-only feature macros (previously unconditionally enabled
234236
on SPIR-V and only selectively disabled via ``-D__undef_<feature>``). All
235237
OpenCL extensions and features are now centralized in OpenCLExtensions.def,

clang/include/clang/Basic/Builtins.td

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2290,6 +2290,18 @@ def ScopedAtomicMaxFetch : AtomicBuiltin {
22902290
let Prototype = "void(...)";
22912291
}
22922292

2293+
def ScopedAtomicUIncWrap : AtomicBuiltin {
2294+
let Spellings = ["__scoped_atomic_uinc_wrap"];
2295+
let Attributes = [CustomTypeChecking];
2296+
let Prototype = "void(...)";
2297+
}
2298+
2299+
def ScopedAtomicUDecWrap : AtomicBuiltin {
2300+
let Spellings = ["__scoped_atomic_udec_wrap"];
2301+
let Attributes = [CustomTypeChecking];
2302+
let Prototype = "void(...)";
2303+
}
2304+
22932305
// OpenCL 2.0 atomic builtins.
22942306
def OpenCLAtomicInit : AtomicBuiltin {
22952307
let Spellings = ["__opencl_atomic_init"];

clang/lib/AST/Expr.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5213,6 +5213,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
52135213
case AO__scoped_atomic_fetch_min:
52145214
case AO__scoped_atomic_fetch_max:
52155215
case AO__scoped_atomic_exchange_n:
5216+
case AO__scoped_atomic_uinc_wrap:
5217+
case AO__scoped_atomic_udec_wrap:
52165218
case AO__hip_atomic_exchange:
52175219
case AO__hip_atomic_fetch_add:
52185220
case AO__hip_atomic_fetch_sub:

clang/lib/CodeGen/CGAtomic.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -767,6 +767,13 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
767767
Op = llvm::AtomicRMWInst::Nand;
768768
break;
769769

770+
case AtomicExpr::AO__scoped_atomic_uinc_wrap:
771+
Op = llvm::AtomicRMWInst::UIncWrap;
772+
break;
773+
case AtomicExpr::AO__scoped_atomic_udec_wrap:
774+
Op = llvm::AtomicRMWInst::UDecWrap;
775+
break;
776+
770777
case AtomicExpr::AO__atomic_test_and_set: {
771778
llvm::AtomicRMWInst *RMWI =
772779
CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr,
@@ -1071,6 +1078,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
10711078
case AtomicExpr::AO__scoped_atomic_xor_fetch:
10721079
case AtomicExpr::AO__scoped_atomic_store_n:
10731080
case AtomicExpr::AO__scoped_atomic_exchange_n:
1081+
case AtomicExpr::AO__scoped_atomic_uinc_wrap:
1082+
case AtomicExpr::AO__scoped_atomic_udec_wrap:
10741083
Val1 = EmitValToTemp(*this, E->getVal1());
10751084
break;
10761085
}
@@ -1269,6 +1278,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
12691278
case AtomicExpr::AO__opencl_atomic_fetch_max:
12701279
case AtomicExpr::AO__scoped_atomic_fetch_max:
12711280
case AtomicExpr::AO__scoped_atomic_max_fetch:
1281+
case AtomicExpr::AO__scoped_atomic_uinc_wrap:
1282+
case AtomicExpr::AO__scoped_atomic_udec_wrap:
12721283
case AtomicExpr::AO__atomic_test_and_set:
12731284
case AtomicExpr::AO__atomic_clear:
12741285
llvm_unreachable("Integral atomic operations always become atomicrmw!");

clang/lib/Sema/SemaChecking.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4482,6 +4482,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
44824482
case AtomicExpr::AO__scoped_atomic_or_fetch:
44834483
case AtomicExpr::AO__scoped_atomic_xor_fetch:
44844484
case AtomicExpr::AO__scoped_atomic_nand_fetch:
4485+
case AtomicExpr::AO__scoped_atomic_uinc_wrap:
4486+
case AtomicExpr::AO__scoped_atomic_udec_wrap:
44854487
Form = Arithmetic;
44864488
break;
44874489

clang/test/CodeGen/scoped-atomic-ops.c

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4539,6 +4539,111 @@ _Bool fi7e(_Bool *c) {
45394539
return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
45404540
__MEMORY_SCOPE_SINGLE);
45414541
}
4542+
4543+
// AMDGCN_CL_DEF-LABEL: define hidden void @fi8a(
4544+
// AMDGCN_CL_DEF-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
4545+
// AMDGCN_CL_DEF-NEXT: [[ENTRY:.*:]]
4546+
// AMDGCN_CL_DEF-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
4547+
// AMDGCN_CL_DEF-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
4548+
// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4, addrspace(5)
4549+
// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4, addrspace(5)
4550+
// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4, addrspace(5)
4551+
// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4, addrspace(5)
4552+
// AMDGCN_CL_DEF-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
4553+
// AMDGCN_CL_DEF-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
4554+
// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
4555+
// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
4556+
// AMDGCN_CL_DEF-NEXT: [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr
4557+
// AMDGCN_CL_DEF-NEXT: [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr
4558+
// AMDGCN_CL_DEF-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
4559+
// AMDGCN_CL_DEF-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
4560+
// AMDGCN_CL_DEF-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
4561+
// AMDGCN_CL_DEF-NEXT: store i32 -1, ptr [[DOTATOMICTMP_ASCAST]], align 4
4562+
// AMDGCN_CL_DEF-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP_ASCAST]], align 4
4563+
// AMDGCN_CL_DEF-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
4564+
// AMDGCN_CL_DEF-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
4565+
// AMDGCN_CL_DEF-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP_ASCAST]], align 4
4566+
// AMDGCN_CL_DEF-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
4567+
// AMDGCN_CL_DEF-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
4568+
// AMDGCN_CL_DEF-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
4569+
// AMDGCN_CL_DEF-NEXT: store i32 -1, ptr [[DOTATOMICTMP1_ASCAST]], align 4
4570+
// AMDGCN_CL_DEF-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1_ASCAST]], align 4
4571+
// AMDGCN_CL_DEF-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
4572+
// AMDGCN_CL_DEF-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4
4573+
// AMDGCN_CL_DEF-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2_ASCAST]], align 4
4574+
// AMDGCN_CL_DEF-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
4575+
// AMDGCN_CL_DEF-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4
4576+
// AMDGCN_CL_DEF-NEXT: ret void
4577+
//
4578+
// AMDGCN_CL_20-LABEL: define hidden void @fi8a(
4579+
// AMDGCN_CL_20-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
4580+
// AMDGCN_CL_20-NEXT: [[ENTRY:.*:]]
4581+
// AMDGCN_CL_20-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
4582+
// AMDGCN_CL_20-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
4583+
// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4, addrspace(5)
4584+
// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4, addrspace(5)
4585+
// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4, addrspace(5)
4586+
// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4, addrspace(5)
4587+
// AMDGCN_CL_20-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
4588+
// AMDGCN_CL_20-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
4589+
// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
4590+
// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
4591+
// AMDGCN_CL_20-NEXT: [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr
4592+
// AMDGCN_CL_20-NEXT: [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr
4593+
// AMDGCN_CL_20-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
4594+
// AMDGCN_CL_20-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
4595+
// AMDGCN_CL_20-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
4596+
// AMDGCN_CL_20-NEXT: store i32 -1, ptr [[DOTATOMICTMP_ASCAST]], align 4
4597+
// AMDGCN_CL_20-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP_ASCAST]], align 4
4598+
// AMDGCN_CL_20-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]]
4599+
// AMDGCN_CL_20-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
4600+
// AMDGCN_CL_20-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP_ASCAST]], align 4
4601+
// AMDGCN_CL_20-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
4602+
// AMDGCN_CL_20-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
4603+
// AMDGCN_CL_20-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
4604+
// AMDGCN_CL_20-NEXT: store i32 -1, ptr [[DOTATOMICTMP1_ASCAST]], align 4
4605+
// AMDGCN_CL_20-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1_ASCAST]], align 4
4606+
// AMDGCN_CL_20-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]]
4607+
// AMDGCN_CL_20-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4
4608+
// AMDGCN_CL_20-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2_ASCAST]], align 4
4609+
// AMDGCN_CL_20-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
4610+
// AMDGCN_CL_20-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4
4611+
// AMDGCN_CL_20-NEXT: ret void
4612+
//
4613+
// SPIRV-LABEL: define hidden spir_func void @fi8a(
4614+
// SPIRV-SAME: ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
4615+
// SPIRV-NEXT: [[ENTRY:.*:]]
4616+
// SPIRV-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
4617+
// SPIRV-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
4618+
// SPIRV-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
4619+
// SPIRV-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
4620+
// SPIRV-NEXT: [[DOTATOMICTMP1:%.*]] = alloca i32, align 4
4621+
// SPIRV-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca i32, align 4
4622+
// SPIRV-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
4623+
// SPIRV-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
4624+
// SPIRV-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
4625+
// SPIRV-NEXT: store i32 -1, ptr [[DOTATOMICTMP]], align 4
4626+
// SPIRV-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
4627+
// SPIRV-NEXT: [[TMP2:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP1]] syncscope("device") monotonic, align 4
4628+
// SPIRV-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
4629+
// SPIRV-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
4630+
// SPIRV-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8
4631+
// SPIRV-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
4632+
// SPIRV-NEXT: [[TMP5:%.*]] = load ptr, ptr [[A_ADDR]], align 8
4633+
// SPIRV-NEXT: store i32 -1, ptr [[DOTATOMICTMP1]], align 4
4634+
// SPIRV-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTATOMICTMP1]], align 4
4635+
// SPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP5]], i32 [[TMP6]] syncscope("device") monotonic, align 4
4636+
// SPIRV-NEXT: store i32 [[TMP7]], ptr [[ATOMIC_TEMP2]], align 4
4637+
// SPIRV-NEXT: [[TMP8:%.*]] = load i32, ptr [[ATOMIC_TEMP2]], align 4
4638+
// SPIRV-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8
4639+
// SPIRV-NEXT: store i32 [[TMP8]], ptr [[TMP9]], align 4
4640+
// SPIRV-NEXT: ret void
4641+
//
4642+
void fi8a(unsigned int *a, unsigned int *b) {
4643+
*b = __scoped_atomic_uinc_wrap(b, ~0U, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
4644+
*a = __scoped_atomic_udec_wrap(a, ~0U, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
4645+
}
4646+
45424647
//.
45434648
// AMDGCN_CL_DEF: [[META3]] = !{}
45444649
//.

clang/test/Sema/scoped-atomic-ops.c

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ void fi2b(int *i) {
3131
__scoped_atomic_store_n(i, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
3232
}
3333

34-
void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
34+
void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) {
3535
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
3636
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
3737
*c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
@@ -40,9 +40,11 @@ void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
4040
*f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
4141
*g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
4242
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
43+
*i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
44+
*j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
4345
}
4446

45-
void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
47+
void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) {
4648
*a = __scoped_atomic_fetch_add(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
4749
*b = __scoped_atomic_fetch_sub(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
4850
*c = __scoped_atomic_fetch_and(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
@@ -51,9 +53,11 @@ void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
5153
*f = __scoped_atomic_fetch_nand(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
5254
*g = __scoped_atomic_fetch_min(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
5355
*h = __scoped_atomic_fetch_max(1, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
56+
*i = __scoped_atomic_uinc_wrap(1, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
57+
*g = __scoped_atomic_udec_wrap(1, 1u, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
5458
}
5559

56-
void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
60+
void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) {
5761
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
5862
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
5963
*c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
@@ -62,9 +66,11 @@ void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
6266
*f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
6367
*g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
6468
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
69+
*i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
70+
*j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED); // expected-error {{too few arguments to function call, expected 4, have 3}}
6571
}
6672

67-
void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
73+
void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h, unsigned *i, unsigned *j) {
6874
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
6975
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
7076
*c = __scoped_atomic_fetch_and(c, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
@@ -73,6 +79,17 @@ void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
7379
*f = __scoped_atomic_fetch_nand(f, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
7480
*g = __scoped_atomic_fetch_min(g, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
7581
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
82+
*i = __scoped_atomic_uinc_wrap(i, 1u, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
83+
*j = __scoped_atomic_udec_wrap(j, 1u, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
84+
}
85+
86+
void fi3e(float *a, float *b, float *c, float *d, float *e, float *f) {
87+
*a = __scoped_atomic_fetch_and(a, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
88+
*b = __scoped_atomic_fetch_or(b, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
89+
*c = __scoped_atomic_fetch_xor(c, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
90+
*d = __scoped_atomic_fetch_nand(d, 1, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
91+
*f = __scoped_atomic_uinc_wrap(f, 1u, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
92+
*e = __scoped_atomic_udec_wrap(e, 1u, __ATOMIC_RELAXED, 42); // expected-error {{address argument to atomic operation must be a pointer to integer ('float *' invalid)}}
7693
}
7794

7895
int fi4a(int *i) {

0 commit comments

Comments
 (0)