@@ -4726,8 +4726,6 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
47264726 // for the narrowed load.
47274727 for (unsigned width = 8 ; width < origWidth; width *= 2 ) {
47284728 EVT newVT = EVT::getIntegerVT (*DAG.getContext (), width);
4729- if (!shouldReduceLoadWidth (Lod, ISD::NON_EXTLOAD, newVT))
4730- continue ;
47314729 APInt newMask = APInt::getLowBitsSet (maskWidth, width);
47324730 // Avoid accessing any padding here for now (we could use memWidth
47334731 // instead of origWidth here otherwise).
@@ -4737,8 +4735,11 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
47374735 unsigned ptrOffset =
47384736 Layout.isLittleEndian () ? offset : memWidth - width - offset;
47394737 unsigned IsFast = 0 ;
4738+ assert ((ptrOffset % 8 ) == 0 && " Non-Bytealigned pointer offset" );
47404739 Align NewAlign = commonAlignment (Lod->getAlign (), ptrOffset / 8 );
4741- if (allowsMemoryAccess (
4740+ if (shouldReduceLoadWidth (Lod, ISD::NON_EXTLOAD, newVT,
4741+ ptrOffset / 8 ) &&
4742+ allowsMemoryAccess (
47424743 *DAG.getContext (), Layout, newVT, Lod->getAddressSpace (),
47434744 NewAlign, Lod->getMemOperand ()->getFlags (), &IsFast) &&
47444745 IsFast) {
@@ -12176,24 +12177,27 @@ SDValue TargetLowering::scalarizeExtractedVectorLoad(EVT ResultVT,
1217612177
1217712178 ISD::LoadExtType ExtTy =
1217812179 ResultVT.bitsGT (VecEltVT) ? ISD::EXTLOAD : ISD::NON_EXTLOAD;
12179- if (!isOperationLegalOrCustom (ISD::LOAD, VecEltVT) ||
12180- !shouldReduceLoadWidth (OriginalLoad, ExtTy, VecEltVT))
12180+ if (!isOperationLegalOrCustom (ISD::LOAD, VecEltVT))
1218112181 return SDValue ();
1218212182
12183+ std::optional<unsigned > ByteOffset;
1218312184 Align Alignment = OriginalLoad->getAlign ();
1218412185 MachinePointerInfo MPI;
1218512186 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
1218612187 int Elt = ConstEltNo->getZExtValue ();
12187- unsigned PtrOff = VecEltVT.getSizeInBits () * Elt / 8 ;
12188- MPI = OriginalLoad->getPointerInfo ().getWithOffset (PtrOff );
12189- Alignment = commonAlignment (Alignment, PtrOff );
12188+ ByteOffset = VecEltVT.getSizeInBits () * Elt / 8 ;
12189+ MPI = OriginalLoad->getPointerInfo ().getWithOffset (*ByteOffset );
12190+ Alignment = commonAlignment (Alignment, *ByteOffset );
1219012191 } else {
1219112192 // Discard the pointer info except the address space because the memory
1219212193 // operand can't represent this new access since the offset is variable.
1219312194 MPI = MachinePointerInfo (OriginalLoad->getPointerInfo ().getAddrSpace ());
1219412195 Alignment = commonAlignment (Alignment, VecEltVT.getSizeInBits () / 8 );
1219512196 }
1219612197
12198+ if (!shouldReduceLoadWidth (OriginalLoad, ExtTy, VecEltVT, ByteOffset))
12199+ return SDValue ();
12200+
1219712201 unsigned IsFast = 0 ;
1219812202 if (!allowsMemoryAccess (*DAG.getContext (), DAG.getDataLayout (), VecEltVT,
1219912203 OriginalLoad->getAddressSpace (), Alignment,
0 commit comments