@@ -431,29 +431,28 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
431431 }
432432
433433 if (!IsRVVSpill) {
434- if (MI.getOpcode () == RISCV::ADDI && !isInt<12 >(Offset.getFixed ())) {
434+ int64_t Val = Offset.getFixed ();
435+ int64_t Lo12 = SignExtend64<12 >(Val);
436+ unsigned Opc = MI.getOpcode ();
437+ if (Opc == RISCV::ADDI && !isInt<12 >(Val)) {
435438 // We chose to emit the canonical immediate sequence rather than folding
436439 // the offset into the using add under the theory that doing so doesn't
437440 // save dynamic instruction count and some target may fuse the canonical
438441 // 32 bit immediate sequence. We still need to clear the portion of the
439442 // offset encoded in the immediate.
440443 MI.getOperand (FIOperandNum + 1 ).ChangeToImmediate (0 );
444+ } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
445+ Opc == RISCV::PREFETCH_W) &&
446+ (Lo12 & 0b11111 ) != 0 ) {
447+ // Prefetch instructions require the offset to be 32 byte aligned.
448+ MI.getOperand (FIOperandNum + 1 ).ChangeToImmediate (0 );
441449 } else {
442450 // We can encode an add with 12 bit signed immediate in the immediate
443451 // operand of our user instruction. As a result, the remaining
444452 // offset can by construction, at worst, a LUI and a ADD.
445- int64_t Val = Offset.getFixed ();
446- int64_t Lo12 = SignExtend64<12 >(Val);
447- if ((MI.getOpcode () == RISCV::PREFETCH_I ||
448- MI.getOpcode () == RISCV::PREFETCH_R ||
449- MI.getOpcode () == RISCV::PREFETCH_W) &&
450- (Lo12 & 0b11111 ) != 0 )
451- MI.getOperand (FIOperandNum + 1 ).ChangeToImmediate (0 );
452- else {
453- MI.getOperand (FIOperandNum + 1 ).ChangeToImmediate (Lo12);
454- Offset = StackOffset::get ((uint64_t )Val - (uint64_t )Lo12,
455- Offset.getScalable ());
456- }
453+ MI.getOperand (FIOperandNum + 1 ).ChangeToImmediate (Lo12);
454+ Offset = StackOffset::get ((uint64_t )Val - (uint64_t )Lo12,
455+ Offset.getScalable ());
457456 }
458457 }
459458
0 commit comments