Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 88 additions & 60 deletions clang/lib/CodeGen/CGAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}

static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Address Dest, Address Ptr,
Address Val1, Address Val2,
uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
Address Dest, Address Ptr, Address Val1,
Address Val2, Address ExpectedResult,
uint64_t Size, llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
Expand Down Expand Up @@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,

CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
auto *I = CGF.Builder.CreateStore(Old, Val1);
CGF.addInstToCurrentSourceAtom(I, Old);
llvm::Type *ExpectedType = ExpectedResult.getElementType();
const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);

if (ExpectedSizeInBytes == Size) {
// Sizes match: store directly
auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
CGF.addInstToCurrentSourceAtom(I, Old);
} else {
// store only the first ExpectedSizeInBytes bytes of Old
llvm::Type *OldType = Old->getType();

// Allocate temporary storage for Old value
Address OldTmp =
CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");

// Store Old into this temporary
auto *I = CGF.Builder.CreateStore(Old, OldTmp);
CGF.addInstToCurrentSourceAtom(I, Old);

// Perform memcpy for first ExpectedSizeInBytes bytes
CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
/*isVolatile=*/false);
}

// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
Expand All @@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
bool IsWeak, Address Dest, Address Ptr,
Address Val1, Address Val2,
llvm::Value *FailureOrderVal,
uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
llvm::SyncScope::ID Scope) {
static void emitAtomicCmpXchgFailureSet(
CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
Address Val1, Address Val2, Address ExpectedResult,
llvm::Value *FailureOrderVal, uint64_t Size,
llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
Expand All @@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// success argument". This condition has been lifted and the only
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
FailureOrder, Scope);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
Size, SuccessOrder, FailureOrder, Scope);
return;
}

Expand All @@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,

// Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);

CGF.Builder.SetInsertPoint(AcquireBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
llvm::AtomicOrdering::Acquire, Scope);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);

CGF.Builder.SetInsertPoint(SeqCstBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
CGF.Builder.CreateBr(ContBB);

CGF.Builder.SetInsertPoint(ContBB);
Expand Down Expand Up @@ -526,8 +546,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,

static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
uint64_t Size, llvm::AtomicOrdering Order,
Address ExpectedResult, llvm::Value *IsWeak,
llvm::Value *FailureOrder, uint64_t Size,
llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
Expand All @@ -542,21 +563,24 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
ExpectedResult, FailureOrder, Size, Order,
Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
ExpectedResult, FailureOrder, Size, Order,
Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__scoped_atomic_compare_exchange:
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
Val1, Val2, FailureOrder, Size, Order, Scope);
Val1, Val2, ExpectedResult, FailureOrder,
Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
Expand All @@ -570,12 +594,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,

CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
ExpectedResult, FailureOrder, Size, Order,
Scope);
CGF.Builder.CreateBr(ContBB);

CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
ExpectedResult, FailureOrder, Size, Order,
Scope);
CGF.Builder.CreateBr(ContBB);

CGF.Builder.SetInsertPoint(ContBB);
Expand Down Expand Up @@ -785,9 +811,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {

static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
uint64_t Size, llvm::AtomicOrdering Order,
llvm::Value *Scope) {
Address OriginalVal1, llvm::Value *IsWeak,
llvm::Value *FailureOrder, uint64_t Size,
llvm::AtomicOrdering Order, llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();

// LLVM atomic instructions always have sync scope. If clang atomic
Expand All @@ -804,8 +830,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Order, CGF.getLLVMContext());
else
SS = llvm::SyncScope::System;
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, SS);
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
FailureOrder, Size, Order, SS);
return;
}

Expand All @@ -814,8 +840,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, SCID);
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
FailureOrder, Size, Order, SCID);
return;
}

Expand All @@ -840,12 +866,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
SI->addCase(Builder.getInt32(S), B);

Builder.SetInsertPoint(B);
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order,
CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
ScopeModel->map(S),
Order,
CGF.getLLVMContext()));
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
FailureOrder, Size, Order,
CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}

Expand Down Expand Up @@ -1046,6 +1071,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);

Address OriginalVal1 = Val1;
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
Expand Down Expand Up @@ -1289,30 +1315,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Monotonic, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Acquire, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Release, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::AcquireRelease, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
Expand Down Expand Up @@ -1348,13 +1376,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {

// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Monotonic, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
Size, llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Acquire, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
Expand All @@ -1363,23 +1391,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Release, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::AcquireRelease, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
Expand Down
4 changes: 3 additions & 1 deletion clang/test/CodeGen/c11atomics-ios.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[ATOMIC_DESIRED:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[ATOMIC_NEW:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[RES_ADDR:%.*]] = alloca i8, align 1
// CHECK: [[OLD_TMP:%.*]] = alloca i64, align 8
// CHECK: store ptr %addr, ptr [[ADDR_ARG]], align 4
// CHECK: store ptr %desired, ptr [[DESIRED_ARG]], align 4
// CHECK: store ptr %new, ptr [[NEW_ARG]], align 4
Expand All @@ -251,7 +252,8 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[RES_BOOL:%.*]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[RES_BOOL]], label {{%.*}}, label {{%.*}}

// CHECK: store i64 [[RES_VAL64]], ptr [[ATOMIC_DESIRED]], align 8
// CHECK: store i64 [[RES_VAL64]], ptr [[OLD_TMP]], align 8
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[DESIRED_ARG:%.*]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
// CHECK: br label {{%.*}}

// CHECK: [[RES_BOOL8:%.*]] = zext i1 [[RES_BOOL]] to i8
Expand Down
Loading
Loading