|
33 | 33 | get_scope, |
34 | 34 | ) |
35 | 35 | from .spv_atomic_fn_declarations import ( |
| 36 | + _SUPPORT_CONVERGENT, |
36 | 37 | get_or_insert_atomic_load_fn, |
37 | 38 | get_or_insert_spv_atomic_compare_exchange_fn, |
38 | 39 | get_or_insert_spv_atomic_exchange_fn, |
@@ -114,19 +115,29 @@ def gen(context, builder, sig, args): |
114 | 115 | mangled_fn_name, |
115 | 116 | ) |
116 | 117 | func.calling_convention = CC_SPIR_FUNC |
117 | | - spirv_memory_semantics_mask = get_memory_semantics_mask( |
118 | | - atomic_ref_ty.memory_order |
119 | | - ) |
120 | | - spirv_scope = get_scope(atomic_ref_ty.memory_scope) |
| 118 | + if _SUPPORT_CONVERGENT: |
| 119 | + func.attributes.add("convergent") |
| 120 | + func.attributes.add("nounwind") |
121 | 121 |
|
122 | 122 | fn_args = [ |
123 | 123 | builder.extract_value(args[0], data_attr_pos), |
124 | | - context.get_constant(types.int32, spirv_scope), |
125 | | - context.get_constant(types.int32, spirv_memory_semantics_mask), |
| 124 | + context.get_constant( |
| 125 | + types.int32, get_scope(atomic_ref_ty.memory_scope) |
| 126 | + ), |
| 127 | + context.get_constant( |
| 128 | + types.int32, |
| 129 | + get_memory_semantics_mask(atomic_ref_ty.memory_order), |
| 130 | + ), |
126 | 131 | args[1], |
127 | 132 | ] |
128 | 133 |
|
129 | | - return builder.call(func, fn_args) |
| 134 | + callinst = builder.call(func, fn_args) |
| 135 | + |
| 136 | + if _SUPPORT_CONVERGENT: |
| 137 | + callinst.attributes.add("convergent") |
| 138 | + callinst.attributes.add("nounwind") |
| 139 | + |
| 140 | + return callinst |
130 | 141 |
|
131 | 142 | return sig, gen |
132 | 143 |
|
@@ -271,6 +282,10 @@ def _intrinsic_load_gen(context, builder, sig, args): |
271 | 282 | fn_args, |
272 | 283 | ) |
273 | 284 |
|
| 285 | + if _SUPPORT_CONVERGENT: |
| 286 | + ret_val.attributes.add("convergent") |
| 287 | + ret_val.attributes.add("nounwind") |
| 288 | + |
274 | 289 | if sig.args[0].dtype == types.float32: |
275 | 290 | ret_val = builder.bitcast(ret_val, llvmir.FloatType()) |
276 | 291 | elif sig.args[0].dtype == types.float64: |
@@ -326,13 +341,17 @@ def _intrinsic_store_gen(context, builder, sig, args): |
326 | 341 | store_arg, |
327 | 342 | ] |
328 | 343 |
|
329 | | - builder.call( |
| 344 | + callinst = builder.call( |
330 | 345 | get_or_insert_spv_atomic_store_fn( |
331 | 346 | context, builder.module, atomic_ref_ty |
332 | 347 | ), |
333 | 348 | atomic_store_fn_args, |
334 | 349 | ) |
335 | 350 |
|
| 351 | + if _SUPPORT_CONVERGENT: |
| 352 | + callinst.attributes.add("convergent") |
| 353 | + callinst.attributes.add("nounwind") |
| 354 | + |
336 | 355 | return sig, _intrinsic_store_gen |
337 | 356 |
|
338 | 357 |
|
@@ -388,6 +407,10 @@ def _intrinsic_exchange_gen(context, builder, sig, args): |
388 | 407 | atomic_exchange_fn_args, |
389 | 408 | ) |
390 | 409 |
|
| 410 | + if _SUPPORT_CONVERGENT: |
| 411 | + ret_val.attributes.add("convergent") |
| 412 | + ret_val.attributes.add("nounwind") |
| 413 | + |
391 | 414 | if sig.args[0].dtype == types.float32: |
392 | 415 | ret_val = builder.bitcast(ret_val, llvmir.FloatType()) |
393 | 416 | elif sig.args[0].dtype == types.float64: |
@@ -478,6 +501,10 @@ def _intrinsic_compare_exchange_gen(context, builder, sig, args): |
478 | 501 | atomic_cmpexchg_fn_args, |
479 | 502 | ) |
480 | 503 |
|
| 504 | + if _SUPPORT_CONVERGENT: |
| 505 | + ret_val.attributes.add("convergent") |
| 506 | + ret_val.attributes.add("nounwind") |
| 507 | + |
481 | 508 | # compare_exchange returns the old value stored in AtomicRef object. |
482 | 509 | # If the return value is same as expected, then compare_exchange |
483 | 510 | # succeeded in replacing AtomicRef object with desired. |
|
0 commit comments