@@ -1358,7 +1358,29 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
13581358 return Changed;
13591359 }
13601360
1361+ bool IsAtomic = TheStore->isAtomic () || TheLoad->isAtomic ();
13611362 bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore;
1363+
1364+ if (IsAtomic) {
1365+ // For now don't support unordered atomic memmove.
1366+ if (UseMemMove)
1367+ return Changed;
1368+
1369+ // We cannot allow unaligned ops for unordered load/store, so reject
1370+ // anything where the alignment isn't at least the element size.
1371+ assert ((StoreAlign && LoadAlign) &&
1372+ " Expect unordered load/store to have align." );
1373+ if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
1374+ return Changed;
1375+
1376+ // If the element.atomic memcpy is not lowered into explicit
1377+ // loads/stores later, then it will be lowered into an element-size
1378+ // specific lib call. If the lib call doesn't exist for our store size, then
1379+ // we shouldn't generate the memcpy.
1380+ if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize ())
1381+ return Changed;
1382+ }
1383+
13621384 if (UseMemMove)
13631385 if (!Verifier.loadAndStoreMayFormMemmove (StoreSize, IsNegStride, *TheLoad,
13641386 IsMemCpy))
@@ -1387,7 +1409,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
13871409 // Check whether to generate an unordered atomic memcpy:
13881410 // If the load or store are atomic, then they must necessarily be unordered
13891411 // by previous checks.
1390- if (!TheStore-> isAtomic () && !TheLoad-> isAtomic () ) {
1412+ if (!IsAtomic ) {
13911413 if (UseMemMove)
13921414 NewCall = Builder.CreateMemMove (
13931415 StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
@@ -1398,23 +1420,6 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
13981420 NumBytes, /* isVolatile=*/ false , AATags.TBAA ,
13991421 AATags.TBAAStruct , AATags.Scope , AATags.NoAlias );
14001422 } else {
1401- // For now don't support unordered atomic memmove.
1402- if (UseMemMove)
1403- return Changed;
1404- // We cannot allow unaligned ops for unordered load/store, so reject
1405- // anything where the alignment isn't at least the element size.
1406- assert ((StoreAlign && LoadAlign) &&
1407- " Expect unordered load/store to have align." );
1408- if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
1409- return Changed;
1410-
1411- // If the element.atomic memcpy is not lowered into explicit
1412- // loads/stores later, then it will be lowered into an element-size
1413- // specific lib call. If the lib call doesn't exist for our store size, then
1414- // we shouldn't generate the memcpy.
1415- if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize ())
1416- return Changed;
1417-
14181423 // Create the call.
14191424 // Note that unordered atomic loads/stores are *required* by the spec to
14201425 // have an alignment but non-atomic loads/stores may not.
0 commit comments