@@ -244,8 +244,94 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
244
244
}
245
245
}
246
246
247
+ static void emitAtomicCmpXchg (CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
248
+ Address dest, Address ptr, Address val1,
249
+ Address val2, uint64_t size,
250
+ cir::MemOrder successOrder,
251
+ cir::MemOrder failureOrder) {
252
+ mlir::Location loc = cgf.getLoc (e->getSourceRange ());
253
+
254
+ CIRGenBuilderTy &builder = cgf.getBuilder ();
255
+ mlir::Value expected = builder.createLoad (loc, val1);
256
+ mlir::Value desired = builder.createLoad (loc, val2);
257
+
258
+ auto cmpxchg = cir::AtomicCmpXchg::create (
259
+ builder, loc, expected.getType (), builder.getBoolTy (), ptr.getPointer (),
260
+ expected, desired,
261
+ cir::MemOrderAttr::get (&cgf.getMLIRContext (), successOrder),
262
+ cir::MemOrderAttr::get (&cgf.getMLIRContext (), failureOrder),
263
+ builder.getI64IntegerAttr (ptr.getAlignment ().getAsAlign ().value ()));
264
+
265
+ cmpxchg.setIsVolatile (e->isVolatile ());
266
+ cmpxchg.setWeak (isWeak);
267
+
268
+ mlir::Value failed = builder.createNot (cmpxchg.getSuccess ());
269
+ cir::IfOp::create (builder, loc, failed, /* withElseRegion=*/ false ,
270
+ [&](mlir::OpBuilder &, mlir::Location) {
271
+ auto ptrTy = mlir::cast<cir::PointerType>(
272
+ val1.getPointer ().getType ());
273
+ if (val1.getElementType () != ptrTy.getPointee ()) {
274
+ val1 = val1.withPointer (builder.createPtrBitcast (
275
+ val1.getPointer (), val1.getElementType ()));
276
+ }
277
+ builder.createStore (loc, cmpxchg.getOld (), val1);
278
+ builder.createYield (loc);
279
+ });
280
+
281
+ // Update the memory at Dest with Success's value.
282
+ cgf.emitStoreOfScalar (cmpxchg.getSuccess (),
283
+ cgf.makeAddrLValue (dest, e->getType ()),
284
+ /* isInit=*/ false );
285
+ }
286
+
287
+ static void emitAtomicCmpXchgFailureSet (CIRGenFunction &cgf, AtomicExpr *e,
288
+ bool isWeak, Address dest, Address ptr,
289
+ Address val1, Address val2,
290
+ Expr *failureOrderExpr, uint64_t size,
291
+ cir::MemOrder successOrder) {
292
+ Expr::EvalResult failureOrderEval;
293
+ if (failureOrderExpr->EvaluateAsInt (failureOrderEval, cgf.getContext ())) {
294
+ uint64_t failureOrderInt = failureOrderEval.Val .getInt ().getZExtValue ();
295
+
296
+ cir::MemOrder failureOrder;
297
+ if (!cir::isValidCIRAtomicOrderingCABI (failureOrderInt)) {
298
+ failureOrder = cir::MemOrder::Relaxed;
299
+ } else {
300
+ switch ((cir::MemOrder)failureOrderInt) {
301
+ case cir::MemOrder::Relaxed:
302
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
303
+ // nor memory_order_acq_rel". Fallback to monotonic.
304
+ case cir::MemOrder::Release:
305
+ case cir::MemOrder::AcquireRelease:
306
+ failureOrder = cir::MemOrder::Relaxed;
307
+ break ;
308
+ case cir::MemOrder::Consume:
309
+ case cir::MemOrder::Acquire:
310
+ failureOrder = cir::MemOrder::Acquire;
311
+ break ;
312
+ case cir::MemOrder::SequentiallyConsistent:
313
+ failureOrder = cir::MemOrder::SequentiallyConsistent;
314
+ break ;
315
+ }
316
+ }
317
+
318
+ // Prior to c++17, "the failure argument shall be no stronger than the
319
+ // success argument". This condition has been lifted and the only
320
+ // precondition is 31.7.2.18. Effectively treat this as a DR and skip
321
+ // language version checks.
322
+ emitAtomicCmpXchg (cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
323
+ failureOrder);
324
+ return ;
325
+ }
326
+
327
+ assert (!cir::MissingFeatures::atomicExpr ());
328
+ cgf.cgm .errorNYI (e->getSourceRange (),
329
+ " emitAtomicCmpXchgFailureSet: non-constant failure order" );
330
+ }
331
+
247
332
static void emitAtomicOp (CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
248
- Address ptr, Address val1, uint64_t size,
333
+ Address ptr, Address val1, Address val2,
334
+ Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
249
335
cir::MemOrder order) {
250
336
std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel ();
251
337
if (scopeModel) {
@@ -264,6 +350,30 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
264
350
case AtomicExpr::AO__c11_atomic_init:
265
351
llvm_unreachable (" already handled!" );
266
352
353
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
354
+ emitAtomicCmpXchgFailureSet (cgf, expr, /* isWeak=*/ false , dest, ptr, val1,
355
+ val2, failureOrderExpr, size, order);
356
+ return ;
357
+
358
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
359
+ emitAtomicCmpXchgFailureSet (cgf, expr, /* isWeak=*/ true , dest, ptr, val1,
360
+ val2, failureOrderExpr, size, order);
361
+ return ;
362
+
363
+ case AtomicExpr::AO__atomic_compare_exchange:
364
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
365
+ bool isWeak = false ;
366
+ if (isWeakExpr->EvaluateAsBooleanCondition (isWeak, cgf.getContext ())) {
367
+ emitAtomicCmpXchgFailureSet (cgf, expr, isWeak, dest, ptr, val1, val2,
368
+ failureOrderExpr, size, order);
369
+ } else {
370
+ assert (!cir::MissingFeatures::atomicExpr ());
371
+ cgf.cgm .errorNYI (expr->getSourceRange (),
372
+ " emitAtomicOp: non-constant isWeak" );
373
+ }
374
+ return ;
375
+ }
376
+
267
377
case AtomicExpr::AO__c11_atomic_load:
268
378
case AtomicExpr::AO__atomic_load_n:
269
379
case AtomicExpr::AO__atomic_load: {
@@ -292,16 +402,12 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
292
402
293
403
case AtomicExpr::AO__opencl_atomic_init:
294
404
295
- case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
296
405
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
297
406
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
298
407
299
- case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
300
408
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
301
409
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
302
410
303
- case AtomicExpr::AO__atomic_compare_exchange:
304
- case AtomicExpr::AO__atomic_compare_exchange_n:
305
411
case AtomicExpr::AO__scoped_atomic_compare_exchange:
306
412
case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
307
413
@@ -421,7 +527,11 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
421
527
if (const auto *ty = atomicTy->getAs <AtomicType>())
422
528
memTy = ty->getValueType ();
423
529
530
+ Expr *isWeakExpr = nullptr ;
531
+ Expr *orderFailExpr = nullptr ;
532
+
424
533
Address val1 = Address::invalid ();
534
+ Address val2 = Address::invalid ();
425
535
Address dest = Address::invalid ();
426
536
Address ptr = emitPointerWithAlignment (e->getPtr ());
427
537
@@ -462,6 +572,24 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
462
572
val1 = emitPointerWithAlignment (e->getVal1 ());
463
573
break ;
464
574
575
+ case AtomicExpr::AO__atomic_compare_exchange:
576
+ case AtomicExpr::AO__atomic_compare_exchange_n:
577
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
578
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
579
+ val1 = emitPointerWithAlignment (e->getVal1 ());
580
+ if (e->getOp () == AtomicExpr::AO__atomic_compare_exchange ||
581
+ e->getOp () == AtomicExpr::AO__scoped_atomic_compare_exchange)
582
+ val2 = emitPointerWithAlignment (e->getVal2 ());
583
+ else
584
+ val2 = emitValToTemp (*this , e->getVal2 ());
585
+ orderFailExpr = e->getOrderFail ();
586
+ if (e->getOp () == AtomicExpr::AO__atomic_compare_exchange_n ||
587
+ e->getOp () == AtomicExpr::AO__atomic_compare_exchange ||
588
+ e->getOp () == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
589
+ e->getOp () == AtomicExpr::AO__scoped_atomic_compare_exchange)
590
+ isWeakExpr = e->getWeak ();
591
+ break ;
592
+
465
593
case AtomicExpr::AO__atomic_store_n:
466
594
case AtomicExpr::AO__c11_atomic_store:
467
595
val1 = emitValToTemp (*this , e->getVal1 ());
@@ -484,6 +612,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
484
612
if (dest.isValid ()) {
485
613
if (shouldCastToIntPtrTy)
486
614
dest = atomics.castToAtomicIntPointer (dest);
615
+ } else if (e->isCmpXChg ()) {
616
+ dest = createMemTemp (resultTy, getLoc (e->getSourceRange ()), " cmpxchg.bool" );
487
617
} else if (!resultTy->isVoidType ()) {
488
618
dest = atomics.createTempAlloca ();
489
619
if (shouldCastToIntPtrTy)
@@ -530,8 +660,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
530
660
// value, but it's hard to enforce that in general.
531
661
uint64_t ord = orderConst.Val .getInt ().getZExtValue ();
532
662
if (isMemOrderValid (ord, isStore, isLoad))
533
- emitAtomicOp (*this , e, dest, ptr, val1, size ,
534
- static_cast <cir::MemOrder>(ord));
663
+ emitAtomicOp (*this , e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr ,
664
+ size, static_cast <cir::MemOrder>(ord));
535
665
} else {
536
666
assert (!cir::MissingFeatures::atomicExpr ());
537
667
cgm.errorNYI (e->getSourceRange (), " emitAtomicExpr: dynamic memory order" );
0 commit comments