From e194af150d8ca9a2c4aa53e90c406c4aa52e4667 Mon Sep 17 00:00:00 2001 From: Ryotaro Kasuga Date: Fri, 24 Jan 2025 10:29:20 +0000 Subject: [PATCH] [LoopIdiom] Move up atomic checks for memcpy/memmove (NFC) This patch moves up the checks that verify if it is legal to replace the atomic load/store with memcpy. Currently these checks are done after we determine to convert the load/store to memcpy/memmove, which makes the logic a bit confusing. This patch is a prelude to #50892 --- .../Transforms/Scalar/LoopIdiomRecognize.cpp | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 3c82eeda54838..e3c59d07b87fb 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1358,7 +1358,29 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( return Changed; } + bool IsAtomic = TheStore->isAtomic() || TheLoad->isAtomic(); bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore; + + if (IsAtomic) { + // For now don't support unordered atomic memmove. + if (UseMemMove) + return Changed; + + // We cannot allow unaligned ops for unordered load/store, so reject + // anything where the alignment isn't at least the element size. + assert((StoreAlign && LoadAlign) && + "Expect unordered load/store to have align."); + if (*StoreAlign < StoreSize || *LoadAlign < StoreSize) + return Changed; + + // If the element.atomic memcpy is not lowered into explicit + // loads/stores later, then it will be lowered into an element-size + // specific lib call. If the lib call doesn't exist for our store size, then + // we shouldn't generate the memcpy. + if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) + return Changed; + } + if (UseMemMove) if (!Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, *TheLoad, IsMemCpy)) @@ -1387,7 +1409,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( // Check whether to generate an unordered atomic memcpy: // If the load or store are atomic, then they must necessarily be unordered // by previous checks. - if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { + if (!IsAtomic) { if (UseMemMove) NewCall = Builder.CreateMemMove( StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, @@ -1398,23 +1420,6 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( NumBytes, /*isVolatile=*/false, AATags.TBAA, AATags.TBAAStruct, AATags.Scope, AATags.NoAlias); } else { - // For now don't support unordered atomic memmove. - if (UseMemMove) - return Changed; - // We cannot allow unaligned ops for unordered load/store, so reject - // anything where the alignment isn't at least the element size. - assert((StoreAlign && LoadAlign) && - "Expect unordered load/store to have align."); - if (*StoreAlign < StoreSize || *LoadAlign < StoreSize) - return Changed; - - // If the element.atomic memcpy is not lowered into explicit - // loads/stores later, then it will be lowered into an element-size - // specific lib call. If the lib call doesn't exist for our store size, then - // we shouldn't generate the memcpy. - if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) - return Changed; - // Create the call. // Note that unordered atomic loads/stores are *required* by the spec to // have an alignment but non-atomic loads/stores may not.