|
13 | 13 | #error "This file is intended for NVPTX targets or offloading to NVPTX" |
14 | 14 | #endif |
15 | 15 |
|
| 16 | +#ifndef __CUDA_ARCH__ |
| 17 | +#define __CUDA_ARCH__ 0 |
| 18 | +#endif |
| 19 | + |
16 | 20 | #include <stdint.h> |
17 | 21 |
|
18 | 22 | #if !defined(__cplusplus) |
@@ -168,6 +172,76 @@ __gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x, |
168 | 172 | ((uint64_t)__gpu_shuffle_idx_u32(__mask, __idx, __lo, __width)); |
169 | 173 | } |
170 | 174 |
|
| 175 | +// Returns a bitmask marking all lanes that have the same value of __x. |
| 176 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 177 | +__gpu_match_any_u32(uint64_t __lane_mask, uint32_t __x) { |
| 178 | + // Newer targets can use the dedicated CUDA support. |
| 179 | + if (__CUDA_ARCH__ >= 700 || __nvvm_reflect("__CUDA_ARCH") >= 700) |
| 180 | + return __nvvm_match_any_sync_i32(__lane_mask, __x); |
| 181 | + |
| 182 | + uint32_t __match_mask = 0; |
| 183 | + bool __done = 0; |
| 184 | + while (__gpu_ballot(__lane_mask, !__done)) { |
| 185 | + if (!__done) { |
| 186 | + uint32_t __first = __gpu_read_first_lane_u32(__lane_mask, __x); |
| 187 | + if (__first == __x) { |
| 188 | + __match_mask = __gpu_lane_mask(); |
| 189 | + __done = 1; |
| 190 | + } |
| 191 | + } |
| 192 | + } |
| 193 | + return __match_mask; |
| 194 | +} |
| 195 | + |
| 196 | +// Returns a bitmask marking all lanes that have the same value of __x. |
| 197 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 198 | +__gpu_match_any_u64(uint64_t __lane_mask, uint64_t __x) { |
| 199 | + // Newer targets can use the dedicated CUDA support. |
| 200 | + if (__CUDA_ARCH__ >= 700 || __nvvm_reflect("__CUDA_ARCH") >= 700) |
| 201 | + return __nvvm_match_any_sync_i64(__lane_mask, __x); |
| 202 | + |
| 203 | + uint64_t __match_mask = 0; |
| 204 | + |
| 205 | + bool __done = 0; |
| 206 | + while (__gpu_ballot(__lane_mask, __done)) { |
| 207 | + if (!__done) { |
| 208 | + uint64_t __first = __gpu_read_first_lane_u64(__lane_mask, __x); |
| 209 | + if (__first == __x) { |
| 210 | + __match_mask = __gpu_lane_mask(); |
| 211 | + __done = 1; |
| 212 | + } |
| 213 | + } |
| 214 | + } |
| 215 | + __gpu_sync_lane(__lane_mask); |
| 216 | + return __match_mask; |
| 217 | +} |
| 218 | + |
| 219 | +// Returns the current lane mask if every lane contains __x. |
| 220 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 221 | +__gpu_match_all_u32(uint64_t __lane_mask, uint32_t __x) { |
| 222 | + // Newer targets can use the dedicated CUDA support. |
| 223 | + int predicate; |
| 224 | + if (__CUDA_ARCH__ >= 700 || __nvvm_reflect("__CUDA_ARCH") >= 700) |
| 225 | + return __nvvm_match_all_sync_i32p(__lane_mask, __x, &predicate); |
| 226 | + |
| 227 | + uint32_t __first = __gpu_read_first_lane_u64(__lane_mask, __x); |
| 228 | + uint64_t __ballot = __gpu_ballot(__lane_mask, __x == __first); |
| 229 | + return __ballot == __gpu_lane_mask() ? __gpu_lane_mask() : 0ull; |
| 230 | +} |
| 231 | + |
| 232 | +// Returns the current lane mask if every lane contains __x. |
| 233 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 234 | +__gpu_match_all_u64(uint64_t __lane_mask, uint64_t __x) { |
| 235 | + // Newer targets can use the dedicated CUDA support. |
| 236 | + int predicate; |
| 237 | + if (__CUDA_ARCH__ >= 700 || __nvvm_reflect("__CUDA_ARCH") >= 700) |
| 238 | + return __nvvm_match_all_sync_i64p(__lane_mask, __x, &predicate); |
| 239 | + |
| 240 | + uint64_t __first = __gpu_read_first_lane_u64(__lane_mask, __x); |
| 241 | + uint64_t __ballot = __gpu_ballot(__lane_mask, __x == __first); |
| 242 | + return __ballot == __gpu_lane_mask() ? __gpu_lane_mask() : 0ull; |
| 243 | +} |
| 244 | + |
171 | 245 | // Returns true if the flat pointer points to CUDA 'shared' memory. |
172 | 246 | _DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) { |
173 | 247 | return __nvvm_isspacep_shared(ptr); |
|
0 commit comments