@@ -374,11 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
374
374
}
375
375
376
376
static void emitAtomicCmpXchg (CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
377
- Address Dest, Address Ptr,
378
- Address Val1, Address Val2,
379
- Address ExpectedResult,
380
- uint64_t Size,
381
- llvm::AtomicOrdering SuccessOrder,
377
+ Address Dest, Address Ptr, Address Val1,
378
+ Address Val2, Address ExpectedResult,
379
+ uint64_t Size, llvm::AtomicOrdering SuccessOrder,
382
380
llvm::AtomicOrdering FailureOrder,
383
381
llvm::SyncScope::ID Scope) {
384
382
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -412,34 +410,31 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
412
410
413
411
CGF.Builder .SetInsertPoint (StoreExpectedBB);
414
412
// Update the memory at Expected with Old's value.
415
- llvm::Type *ExpectedType = ExpectedResult.getElementType ();
416
- const llvm::DataLayout &DL = CGF.CGM .getDataLayout ();
417
- uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize (ExpectedType);
418
-
419
- if (ExpectedSizeInBytes == Size) {
420
- // Sizes match: store directly
421
- auto I* = CGF.Builder .CreateStore (Old, ExpectedResult);
422
- CGF.addInstToCurrentSourceAtom (I, Old);
423
- } else {
424
- // store only the first ExpectedSizeInBytes bytes of Old
425
- llvm::Type *OldType = Old->getType ();
426
-
427
- // Allocate temporary storage for Old value
428
- Address OldTmp = CGF.CreateTempAlloca (OldType, Ptr.getAlignment (), " old.tmp" );
429
-
430
- // Store Old into this temporary
431
- auto *I = CGF.Builder .CreateStore (Old, OldTmp);
432
- CGF.addInstToCurrentSourceAtom (I, Old);
433
-
434
- // Perform memcpy for first ExpectedSizeInBytes bytes
435
- CGF.Builder .CreateMemCpy (
436
- ExpectedResult,
437
- OldTmp,
438
- ExpectedSizeInBytes,
439
- /* isVolatile=*/ false );
440
- }
413
+ llvm::Type *ExpectedType = ExpectedResult.getElementType ();
414
+ const llvm::DataLayout &DL = CGF.CGM .getDataLayout ();
415
+ uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize (ExpectedType);
416
+
417
+ if (ExpectedSizeInBytes == Size) {
418
+ // Sizes match: store directly
419
+ auto *I = CGF.Builder .CreateStore (Old, ExpectedResult);
420
+ CGF.addInstToCurrentSourceAtom (I, Old);
421
+ } else {
422
+ // store only the first ExpectedSizeInBytes bytes of Old
423
+ llvm::Type *OldType = Old->getType ();
424
+
425
+ // Allocate temporary storage for Old value
426
+ Address OldTmp =
427
+ CGF.CreateTempAlloca (OldType, Ptr.getAlignment (), " old.tmp" );
428
+
429
+ // Store Old into this temporary
430
+ auto *I = CGF.Builder .CreateStore (Old, OldTmp);
431
+ CGF.addInstToCurrentSourceAtom (I, Old);
432
+
433
+ // Perform memcpy for first ExpectedSizeInBytes bytes
434
+ CGF.Builder .CreateMemCpy (ExpectedResult, OldTmp, ExpectedSizeInBytes,
435
+ /* isVolatile=*/ false );
436
+ }
441
437
442
-
443
438
// Finally, branch to the exit point.
444
439
CGF.Builder .CreateBr (ContinueBB);
445
440
@@ -451,14 +446,11 @@ if (ExpectedSizeInBytes == Size) {
451
446
// / Given an ordering required on success, emit all possible cmpxchg
452
447
// / instructions to cope with the provided (but possibly only dynamically known)
453
448
// / FailureOrder.
454
- static void emitAtomicCmpXchgFailureSet (CodeGenFunction &CGF, AtomicExpr *E,
455
- bool IsWeak, Address Dest, Address Ptr,
456
- Address Val1, Address Val2,
457
- Address ExpectedResult,
458
- llvm::Value *FailureOrderVal,
459
- uint64_t Size,
460
- llvm::AtomicOrdering SuccessOrder,
461
- llvm::SyncScope::ID Scope) {
449
+ static void emitAtomicCmpXchgFailureSet (
450
+ CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
451
+ Address Val1, Address Val2, Address ExpectedResult,
452
+ llvm::Value *FailureOrderVal, uint64_t Size,
453
+ llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
462
454
llvm::AtomicOrdering FailureOrder;
463
455
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
464
456
auto FOS = FO->getSExtValue ();
@@ -485,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
485
477
// success argument". This condition has been lifted and the only
486
478
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
487
479
// language version checks.
488
- emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, SuccessOrder,
489
- FailureOrder, Scope);
480
+ emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
481
+ Size, SuccessOrder, FailureOrder, Scope);
490
482
return ;
491
483
}
492
484
@@ -510,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
510
502
511
503
// Emit all the different atomics
512
504
CGF.Builder .SetInsertPoint (MonotonicBB);
513
- emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
514
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
505
+ emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
506
+ SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
515
507
CGF.Builder .CreateBr (ContBB);
516
508
517
509
CGF.Builder .SetInsertPoint (AcquireBB);
518
- emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, SuccessOrder,
519
- llvm::AtomicOrdering::Acquire, Scope);
510
+ emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
511
+ SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
520
512
CGF.Builder .CreateBr (ContBB);
521
513
522
514
CGF.Builder .SetInsertPoint (SeqCstBB);
523
- emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, SuccessOrder,
524
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
515
+ emitAtomicCmpXchg (CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
516
+ SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
517
+ Scope);
525
518
CGF.Builder .CreateBr (ContBB);
526
519
527
520
CGF.Builder .SetInsertPoint (ContBB);
@@ -553,9 +546,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
553
546
554
547
static void EmitAtomicOp (CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
555
548
Address Ptr, Address Val1, Address Val2,
556
- Address ExpectedResult,
557
- llvm::Value *IsWeak, llvm::Value *FailureOrder ,
558
- uint64_t Size, llvm::AtomicOrdering Order,
549
+ Address ExpectedResult, llvm::Value *IsWeak,
550
+ llvm::Value *FailureOrder, uint64_t Size ,
551
+ llvm::AtomicOrdering Order,
559
552
llvm::SyncScope::ID Scope) {
560
553
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
561
554
bool PostOpMinMax = false ;
@@ -570,21 +563,24 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
570
563
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
571
564
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
572
565
emitAtomicCmpXchgFailureSet (CGF, E, false , Dest, Ptr, Val1, Val2,
573
- ExpectedResult, FailureOrder, Size, Order, Scope);
566
+ ExpectedResult, FailureOrder, Size, Order,
567
+ Scope);
574
568
return ;
575
569
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
576
570
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
577
571
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
578
572
emitAtomicCmpXchgFailureSet (CGF, E, true , Dest, Ptr, Val1, Val2,
579
- ExpectedResult, FailureOrder, Size, Order, Scope);
573
+ ExpectedResult, FailureOrder, Size, Order,
574
+ Scope);
580
575
return ;
581
576
case AtomicExpr::AO__atomic_compare_exchange:
582
577
case AtomicExpr::AO__atomic_compare_exchange_n:
583
578
case AtomicExpr::AO__scoped_atomic_compare_exchange:
584
579
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
585
580
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
586
581
emitAtomicCmpXchgFailureSet (CGF, E, IsWeakC->getZExtValue (), Dest, Ptr,
587
- Val1, Val2, ExpectedResult, FailureOrder, Size, Order, Scope);
582
+ Val1, Val2, ExpectedResult, FailureOrder,
583
+ Size, Order, Scope);
588
584
} else {
589
585
// Create all the relevant BB's
590
586
llvm::BasicBlock *StrongBB =
@@ -598,12 +594,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
598
594
599
595
CGF.Builder .SetInsertPoint (StrongBB);
600
596
emitAtomicCmpXchgFailureSet (CGF, E, false , Dest, Ptr, Val1, Val2,
601
- ExpectedResult, FailureOrder, Size, Order, Scope);
597
+ ExpectedResult, FailureOrder, Size, Order,
598
+ Scope);
602
599
CGF.Builder .CreateBr (ContBB);
603
600
604
601
CGF.Builder .SetInsertPoint (WeakBB);
605
602
emitAtomicCmpXchgFailureSet (CGF, E, true , Dest, Ptr, Val1, Val2,
606
- ExpectedResult, FailureOrder, Size, Order, Scope);
603
+ ExpectedResult, FailureOrder, Size, Order,
604
+ Scope);
607
605
CGF.Builder .CreateBr (ContBB);
608
606
609
607
CGF.Builder .SetInsertPoint (ContBB);
@@ -813,10 +811,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
813
811
814
812
static void EmitAtomicOp (CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
815
813
Address Ptr, Address Val1, Address Val2,
816
- Address OriginalVal1,
817
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
818
- uint64_t Size, llvm::AtomicOrdering Order,
819
- llvm::Value *Scope) {
814
+ Address OriginalVal1, llvm::Value *IsWeak,
815
+ llvm::Value *FailureOrder, uint64_t Size,
816
+ llvm::AtomicOrdering Order, llvm::Value *Scope) {
820
817
auto ScopeModel = Expr->getScopeModel ();
821
818
822
819
// LLVM atomic instructions always have sync scope. If clang atomic
@@ -833,8 +830,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
833
830
Order, CGF.getLLVMContext ());
834
831
else
835
832
SS = llvm::SyncScope::System;
836
- EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, FailureOrder, Size,
837
- Order, SS);
833
+ EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
834
+ FailureOrder, Size, Order, SS);
838
835
return ;
839
836
}
840
837
@@ -843,8 +840,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
843
840
auto SCID = CGF.getTargetHooks ().getLLVMSyncScopeID (
844
841
CGF.CGM .getLangOpts (), ScopeModel->map (SC->getZExtValue ()),
845
842
Order, CGF.CGM .getLLVMContext ());
846
- EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, FailureOrder, Size,
847
- Order, SCID);
843
+ EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
844
+ FailureOrder, Size, Order, SCID);
848
845
return ;
849
846
}
850
847
@@ -869,12 +866,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
869
866
SI->addCase (Builder.getInt32 (S), B);
870
867
871
868
Builder.SetInsertPoint (B);
872
- EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, FailureOrder, Size,
873
- Order,
874
- CGF.getTargetHooks ().getLLVMSyncScopeID (CGF.CGM .getLangOpts (),
875
- ScopeModel->map (S),
876
- Order,
877
- CGF.getLLVMContext ()));
869
+ EmitAtomicOp (CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
870
+ FailureOrder, Size, Order,
871
+ CGF.getTargetHooks ().getLLVMSyncScopeID (
872
+ CGF.CGM .getLangOpts (), ScopeModel->map (S), Order,
873
+ CGF.getLLVMContext ()));
878
874
Builder.CreateBr (ContBB);
879
875
}
880
876
@@ -1319,30 +1315,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
1319
1315
if (llvm::isValidAtomicOrderingCABI (ord))
1320
1316
switch ((llvm::AtomicOrderingCABI)ord) {
1321
1317
case llvm::AtomicOrderingCABI::relaxed:
1322
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1323
- llvm::AtomicOrdering::Monotonic, Scope);
1318
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1319
+ OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
1324
1320
break ;
1325
1321
case llvm::AtomicOrderingCABI::consume:
1326
1322
case llvm::AtomicOrderingCABI::acquire:
1327
1323
if (IsStore)
1328
1324
break ; // Avoid crashing on code with undefined behavior
1329
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1330
- llvm::AtomicOrdering::Acquire, Scope);
1325
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1326
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
1331
1327
break ;
1332
1328
case llvm::AtomicOrderingCABI::release:
1333
1329
if (IsLoad)
1334
1330
break ; // Avoid crashing on code with undefined behavior
1335
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1336
- llvm::AtomicOrdering::Release, Scope);
1331
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1332
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
1337
1333
break ;
1338
1334
case llvm::AtomicOrderingCABI::acq_rel:
1339
1335
if (IsLoad || IsStore)
1340
1336
break ; // Avoid crashing on code with undefined behavior
1341
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1342
- llvm::AtomicOrdering::AcquireRelease, Scope);
1337
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1338
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
1339
+ Scope);
1343
1340
break ;
1344
1341
case llvm::AtomicOrderingCABI::seq_cst:
1345
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1342
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1343
+ OrderFail, Size,
1346
1344
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1347
1345
break ;
1348
1346
}
@@ -1378,13 +1376,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
1378
1376
1379
1377
// Emit all the different atomics
1380
1378
Builder.SetInsertPoint (MonotonicBB);
1381
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1382
- llvm::AtomicOrdering::Monotonic, Scope);
1379
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1380
+ Size, llvm::AtomicOrdering::Monotonic, Scope);
1383
1381
Builder.CreateBr (ContBB);
1384
1382
if (!IsStore) {
1385
1383
Builder.SetInsertPoint (AcquireBB);
1386
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1387
- llvm::AtomicOrdering::Acquire, Scope);
1384
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1385
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
1388
1386
Builder.CreateBr (ContBB);
1389
1387
SI->addCase (Builder.getInt32 ((int )llvm::AtomicOrderingCABI::consume),
1390
1388
AcquireBB);
@@ -1393,23 +1391,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
1393
1391
}
1394
1392
if (!IsLoad) {
1395
1393
Builder.SetInsertPoint (ReleaseBB);
1396
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1397
- llvm::AtomicOrdering::Release, Scope);
1394
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1395
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
1398
1396
Builder.CreateBr (ContBB);
1399
1397
SI->addCase (Builder.getInt32 ((int )llvm::AtomicOrderingCABI::release),
1400
1398
ReleaseBB);
1401
1399
}
1402
1400
if (!IsLoad && !IsStore) {
1403
1401
Builder.SetInsertPoint (AcqRelBB);
1404
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1405
- llvm::AtomicOrdering::AcquireRelease, Scope);
1402
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1403
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
1406
1404
Builder.CreateBr (ContBB);
1407
1405
SI->addCase (Builder.getInt32 ((int )llvm::AtomicOrderingCABI::acq_rel),
1408
1406
AcqRelBB);
1409
1407
}
1410
1408
Builder.SetInsertPoint (SeqCstBB);
1411
- EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, Size,
1412
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1409
+ EmitAtomicOp (*this , E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1410
+ Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1413
1411
Builder.CreateBr (ContBB);
1414
1412
SI->addCase (Builder.getInt32 ((int )llvm::AtomicOrderingCABI::seq_cst),
1415
1413
SeqCstBB);
0 commit comments