|
10 | 10 |
|
11 | 11 | // TODO: Stop manually mangling this name. Need C++ namespaces to get the exact mangling. |
12 | 12 |
|
13 | | -#define IMPL(TYPE, TYPE_MANGLED, AS, AS_MANGLED, NAME, PREFIX, SUFFIX) \ |
| 13 | +#define IMPL(TYPE, TYPE_MANGLED, AS, AS_MANGLED, SUB, NAME, PREFIX, SUFFIX) \ |
14 | 14 | _CLC_DEF TYPE \ |
15 | | - _Z18##NAME##PU3##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS1_19MemorySemanticsMask4FlagE##TYPE_MANGLED( \ |
| 15 | + _Z18##NAME##P##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS##SUB##_19MemorySemanticsMask4FlagE##TYPE_MANGLED( \ |
16 | 16 | volatile AS TYPE *p, enum Scope scope, \ |
17 | 17 | enum MemorySemanticsMask semantics, TYPE val) { \ |
18 | 18 | return PREFIX##__sync_fetch_and_##SUFFIX(p, val); \ |
19 | 19 | } |
20 | 20 |
|
21 | | -IMPL(int, i, global, AS1, __spirv_AtomicSMax, , max) |
22 | | -IMPL(unsigned int, j, global, AS1, __spirv_AtomicUMax, , umax) |
23 | | -IMPL(int, i, local, AS3, __spirv_AtomicSMax, , max) |
24 | | -IMPL(unsigned int, j, local, AS3, __spirv_AtomicUMax, , umax) |
| 21 | +IMPL(int, i, global, U3AS1, 1, __spirv_AtomicSMax, , max) |
| 22 | +IMPL(unsigned int, j, global, U3AS1, 1, __spirv_AtomicUMax, , umax) |
| 23 | +IMPL(int, i, local, U3AS3, 1, __spirv_AtomicSMax, , max) |
| 24 | +IMPL(unsigned int, j, local, U3AS3, 1, __spirv_AtomicUMax, , umax) |
25 | 25 |
|
26 | 26 | #ifdef cl_khr_int64_extended_atomics |
27 | 27 | unsigned long __clc__sync_fetch_and_max_local_8(volatile local long *, long); |
28 | 28 | unsigned long __clc__sync_fetch_and_max_global_8(volatile global long *, long); |
29 | 29 | unsigned long __clc__sync_fetch_and_umax_local_8(volatile local unsigned long *, unsigned long); |
30 | 30 | unsigned long __clc__sync_fetch_and_umax_global_8(volatile global unsigned long *, unsigned long); |
31 | 31 |
|
32 | | -IMPL(long, l, global, AS1, __spirv_AtomicSMax, __clc, max_global_8) |
33 | | -IMPL(unsigned long, m, global, AS1, __spirv_AtomicUMax, __clc, umax_global_8) |
34 | | -IMPL(long, l, local, AS3, __spirv_AtomicSMax, __clc, max_local_8) |
35 | | -IMPL(unsigned long, m, local, AS3, __spirv_AtomicUMax, __clc, umax_local_8) |
| 32 | +IMPL(long, l, global, U3AS1, 1, __spirv_AtomicSMax, __clc, max_global_8) |
| 33 | +IMPL(unsigned long, m, global, U3AS1, 1, __spirv_AtomicUMax, __clc, umax_global_8) |
| 34 | +IMPL(long, l, local, U3AS3, 1, __spirv_AtomicSMax, __clc, max_local_8) |
| 35 | +IMPL(unsigned long, m, local, U3AS3, 1, __spirv_AtomicUMax, __clc, umax_local_8) |
36 | 36 | #endif |
| 37 | + |
| 38 | +#if _CLC_GENERIC_AS_SUPPORTED |
| 39 | + |
| 40 | + |
| 41 | +#define IMPL_GENERIC(TYPE, TYPE_MANGLED, NAME, PREFIX, SUFFIX) \ |
| 42 | + IMPL(TYPE, TYPE_MANGLED, , , 0, NAME, PREFIX, SUFFIX) |
| 43 | + |
| 44 | +IMPL_GENERIC(int, i, __spirv_AtomicSMax, , max) |
| 45 | +IMPL_GENERIC(unsigned int, j, __spirv_AtomicUMax, , umax) |
| 46 | + |
| 47 | +#ifdef cl_khr_int64_extended_atomics |
| 48 | + |
| 49 | +unsigned long __clc__sync_fetch_and_max_generic_8(volatile generic long *, long); |
| 50 | +unsigned long __clc__sync_fetch_and_umax_generic_8(volatile __generic unsigned long *, unsigned long); |
| 51 | + |
| 52 | +IMPL_GENERIC(long, l, __spirv_AtomicSMax, __clc, max_generic_8) |
| 53 | +IMPL_GENERIC(unsigned long, m, __spirv_AtomicUMax, __clc, umax_generic_8) |
| 54 | +#endif |
| 55 | + |
| 56 | + |
| 57 | +#endif //_CLC_GENERIC_AS_SUPPORTED |
37 | 58 | #undef IMPL |
0 commit comments