Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 23 additions & 18 deletions llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1358,7 +1358,29 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
return Changed;
}

bool IsAtomic = TheStore->isAtomic() || TheLoad->isAtomic();
bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore;

if (IsAtomic) {
// For now don't support unordered atomic memmove.
if (UseMemMove)
return Changed;

// We cannot allow unaligned ops for unordered load/store, so reject
// anything where the alignment isn't at least the element size.
assert((StoreAlign && LoadAlign) &&
"Expect unordered load/store to have align.");
if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
return Changed;

// If the element.atomic memcpy is not lowered into explicit
// loads/stores later, then it will be lowered into an element-size
// specific lib call. If the lib call doesn't exist for our store size, then
// we shouldn't generate the memcpy.
if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
return Changed;
}

if (UseMemMove)
if (!Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, *TheLoad,
IsMemCpy))
Expand Down Expand Up @@ -1387,7 +1409,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// Check whether to generate an unordered atomic memcpy:
// If the load or store are atomic, then they must necessarily be unordered
// by previous checks.
if (!TheStore->isAtomic() && !TheLoad->isAtomic()) {
if (!IsAtomic) {
if (UseMemMove)
NewCall = Builder.CreateMemMove(
StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
Expand All @@ -1398,23 +1420,6 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
NumBytes, /*isVolatile=*/false, AATags.TBAA,
AATags.TBAAStruct, AATags.Scope, AATags.NoAlias);
} else {
// For now don't support unordered atomic memmove.
if (UseMemMove)
return Changed;
// We cannot allow unaligned ops for unordered load/store, so reject
// anything where the alignment isn't at least the element size.
assert((StoreAlign && LoadAlign) &&
"Expect unordered load/store to have align.");
if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
return Changed;

// If the element.atomic memcpy is not lowered into explicit
// loads/stores later, then it will be lowered into an element-size
// specific lib call. If the lib call doesn't exist for our store size, then
// we shouldn't generate the memcpy.
if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
return Changed;

// Create the call.
// Note that unordered atomic loads/stores are *required* by the spec to
// have an alignment but non-atomic loads/stores may not.
Expand Down
Loading