diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index 1964ef43beda7..09a31fb2306de 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -731,6 +731,7 @@ bool RISCVOptWInstrs::stripWSuffixes(MachineFunction &MF, for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { unsigned Opc; + // clang-format off switch (MI.getOpcode()) { default: continue; @@ -738,7 +739,9 @@ bool RISCVOptWInstrs::stripWSuffixes(MachineFunction &MF, case RISCV::ADDIW: Opc = RISCV::ADDI; break; case RISCV::MULW: Opc = RISCV::MUL; break; case RISCV::SLLIW: Opc = RISCV::SLLI; break; + case RISCV::SUBW: Opc = RISCV::SUB; break; } + // clang-format on if (hasAllWUsers(MI, ST, MRI)) { LLVM_DEBUG(dbgs() << "Replacing " << MI); diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll index 4b999b892ed35..2a93585ea6876 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll @@ -66,7 +66,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind { ; RV64IM-NEXT: srli a2, a2, 32 ; RV64IM-NEXT: mul a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: subw a0, a0, a1 +; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: srliw a0, a0, 1 ; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: srliw a0, a0, 2 @@ -79,7 +79,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind { ; RV64IMZB-NEXT: zext.w a2, a0 ; RV64IMZB-NEXT: mul a1, a2, a1 ; RV64IMZB-NEXT: srli a1, a1, 32 -; RV64IMZB-NEXT: subw a0, a0, a1 +; RV64IMZB-NEXT: sub a0, a0, a1 ; RV64IMZB-NEXT: srliw a0, a0, 1 ; RV64IMZB-NEXT: add a0, a0, a1 ; RV64IMZB-NEXT: srliw a0, a0, 2 @@ -250,7 +250,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind { ; RV64-NEXT: zext.b a2, a0 ; RV64-NEXT: mul a1, a2, a1 ; RV64-NEXT: srli a1, a1, 8 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: zext.b a0, a0 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: add a0, a0, a1 @@ -816,7 +816,7 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind { ; RV64IM-NEXT: mul a1, a2, a1 ; RV64IM-NEXT: slli a1, a1, 48 ; RV64IM-NEXT: srai a1, a1, 56 -; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: sub a1, a1, a0 ; RV64IM-NEXT: slli a1, a1, 56 ; RV64IM-NEXT: srai a0, a1, 58 ; RV64IM-NEXT: zext.b a1, a0 @@ -1071,7 +1071,7 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind { ; RV64IM-NEXT: srai a2, a2, 48 ; RV64IM-NEXT: mul a1, a2, a1 ; RV64IM-NEXT: sraiw a1, a1, 16 -; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: sub a1, a1, a0 ; RV64IM-NEXT: slli a1, a1, 48 ; RV64IM-NEXT: srai a0, a1, 51 ; RV64IM-NEXT: slli a1, a0, 48 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll index 8a786fc9993d2..46d1661983c6a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll @@ -29,7 +29,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotl_32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -55,7 +55,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotl_32: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: sllw a1, a0, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -78,7 +78,7 @@ define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotr_32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -104,7 +104,7 @@ define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotr_32: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: srlw a1, a0, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -167,7 +167,7 @@ define i64 @rotl_64(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotl_64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -276,7 +276,7 @@ define i64 @rotl_64(i64 %x, i64 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotl_64: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: sll a1, a0, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -340,7 +340,7 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotr_64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -451,7 +451,7 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotr_64: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: srl a1, a0, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -474,7 +474,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotl_32_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -490,7 +490,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64ZBB-LABEL: rotl_32_mask: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: negw a2, a1 +; RV64ZBB-NEXT: neg a2, a1 ; RV64ZBB-NEXT: sllw a1, a0, a1 ; RV64ZBB-NEXT: srlw a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -506,7 +506,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotl_32_mask: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: sllw a1, a0, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -531,7 +531,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64I-LABEL: rotl_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -547,7 +547,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64ZBB-LABEL: rotl_32_mask_and_63_and_31: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sllw a2, a0, a1 -; RV64ZBB-NEXT: negw a1, a1 +; RV64ZBB-NEXT: neg a1, a1 ; RV64ZBB-NEXT: srlw a0, a0, a1 ; RV64ZBB-NEXT: or a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -563,7 +563,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_32_mask_and_63_and_31: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -632,7 +632,7 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotr_32_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -648,7 +648,7 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64ZBB-LABEL: rotr_32_mask: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: negw a2, a1 +; RV64ZBB-NEXT: neg a2, a1 ; RV64ZBB-NEXT: srlw a1, a0, a1 ; RV64ZBB-NEXT: sllw a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -664,7 +664,7 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotr_32_mask: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: srlw a1, a0, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -689,7 +689,7 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64I-LABEL: rotr_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -705,7 +705,7 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64ZBB-LABEL: rotr_32_mask_and_63_and_31: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srlw a2, a0, a1 -; RV64ZBB-NEXT: negw a1, a1 +; RV64ZBB-NEXT: neg a1, a1 ; RV64ZBB-NEXT: sllw a0, a0, a1 ; RV64ZBB-NEXT: or a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -721,7 +721,7 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_32_mask_and_63_and_31: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -829,7 +829,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotl_64_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -884,7 +884,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64ZBB-LABEL: rotl_64_mask: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: negw a2, a1 +; RV64ZBB-NEXT: neg a2, a1 ; RV64ZBB-NEXT: sll a1, a0, a1 ; RV64ZBB-NEXT: srl a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -939,7 +939,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotl_64_mask: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: sll a1, a0, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -1005,7 +1005,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64I-LABEL: rotl_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -1062,7 +1062,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64ZBB-LABEL: rotl_64_mask_and_127_and_63: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sll a2, a0, a1 -; RV64ZBB-NEXT: negw a1, a1 +; RV64ZBB-NEXT: neg a1, a1 ; RV64ZBB-NEXT: srl a0, a0, a1 ; RV64ZBB-NEXT: or a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -1119,7 +1119,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_mask_and_127_and_63: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1277,7 +1277,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotr_64_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -1331,7 +1331,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64ZBB-LABEL: rotr_64_mask: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: negw a2, a1 +; RV64ZBB-NEXT: neg a2, a1 ; RV64ZBB-NEXT: srl a1, a0, a1 ; RV64ZBB-NEXT: sll a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -1385,7 +1385,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64XTHEADBB-LABEL: rotr_64_mask: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: negw a2, a1 +; RV64XTHEADBB-NEXT: neg a2, a1 ; RV64XTHEADBB-NEXT: srl a1, a0, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -1451,7 +1451,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64I-LABEL: rotr_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -1508,7 +1508,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64ZBB-LABEL: rotr_64_mask_and_127_and_63: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srl a2, a0, a1 -; RV64ZBB-NEXT: negw a1, a1 +; RV64ZBB-NEXT: neg a1, a1 ; RV64ZBB-NEXT: sll a0, a0, a1 ; RV64ZBB-NEXT: or a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -1565,7 +1565,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_mask_and_127_and_63: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1701,7 +1701,7 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64I: # %bb.0: ; RV64I-NEXT: andi a3, a2, 31 ; RV64I-NEXT: sllw a4, a0, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: srlw a0, a0, a3 ; RV64I-NEXT: or a0, a4, a0 ; RV64I-NEXT: sllw a1, a1, a2 @@ -1737,7 +1737,7 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: andi a3, a2, 31 ; RV64XTHEADBB-NEXT: sllw a4, a0, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: srlw a0, a0, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 ; RV64XTHEADBB-NEXT: sllw a1, a1, a2 @@ -1822,7 +1822,7 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64I: # %bb.0: ; RV64I-NEXT: andi a3, a2, 63 ; RV64I-NEXT: sll a4, a0, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: srl a0, a0, a3 ; RV64I-NEXT: or a0, a4, a0 ; RV64I-NEXT: sll a1, a1, a2 @@ -1972,7 +1972,7 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: andi a3, a2, 63 ; RV64XTHEADBB-NEXT: sll a4, a0, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: srl a0, a0, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 ; RV64XTHEADBB-NEXT: sll a1, a1, a2 @@ -2002,7 +2002,7 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64I: # %bb.0: ; RV64I-NEXT: andi a3, a2, 31 ; RV64I-NEXT: srlw a4, a0, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: sllw a0, a0, a3 ; RV64I-NEXT: or a0, a4, a0 ; RV64I-NEXT: sllw a1, a1, a2 @@ -2038,7 +2038,7 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: andi a3, a2, 31 ; RV64XTHEADBB-NEXT: srlw a4, a0, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: sllw a0, a0, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 ; RV64XTHEADBB-NEXT: sllw a1, a1, a2 @@ -2125,7 +2125,7 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64I: # %bb.0: ; RV64I-NEXT: andi a3, a2, 63 ; RV64I-NEXT: srl a4, a0, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: sll a0, a0, a3 ; RV64I-NEXT: or a0, a4, a0 ; RV64I-NEXT: sll a1, a1, a2 @@ -2279,7 +2279,7 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: andi a3, a2, 63 ; RV64XTHEADBB-NEXT: srl a4, a0, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: sll a0, a0, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 ; RV64XTHEADBB-NEXT: sll a1, a1, a2 @@ -2312,8 +2312,8 @@ define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64I-NEXT: andi a3, a2, 31 ; RV64I-NEXT: sllw a4, a0, a2 ; RV64I-NEXT: sllw a2, a1, a2 -; RV64I-NEXT: negw a5, a3 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a5, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: srlw a0, a0, a5 ; RV64I-NEXT: srlw a1, a1, a3 ; RV64I-NEXT: or a0, a4, a0 @@ -2353,8 +2353,8 @@ define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64XTHEADBB-NEXT: andi a3, a2, 31 ; RV64XTHEADBB-NEXT: sllw a4, a0, a2 ; RV64XTHEADBB-NEXT: sllw a2, a1, a2 -; RV64XTHEADBB-NEXT: negw a5, a3 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a5, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: srlw a0, a0, a5 ; RV64XTHEADBB-NEXT: srlw a1, a1, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 @@ -2464,7 +2464,7 @@ define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64I-NEXT: andi a3, a2, 63 ; RV64I-NEXT: sll a4, a0, a2 ; RV64I-NEXT: sll a2, a1, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: srl a0, a0, a3 ; RV64I-NEXT: srl a1, a1, a3 ; RV64I-NEXT: or a0, a4, a0 @@ -2664,7 +2664,7 @@ define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64XTHEADBB-NEXT: andi a3, a2, 63 ; RV64XTHEADBB-NEXT: sll a4, a0, a2 ; RV64XTHEADBB-NEXT: sll a2, a1, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: srl a0, a0, a3 ; RV64XTHEADBB-NEXT: srl a1, a1, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 @@ -2697,8 +2697,8 @@ define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64I-NEXT: andi a3, a2, 31 ; RV64I-NEXT: srlw a4, a0, a2 ; RV64I-NEXT: srlw a2, a1, a2 -; RV64I-NEXT: negw a5, a3 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a5, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: sllw a0, a0, a5 ; RV64I-NEXT: sllw a1, a1, a3 ; RV64I-NEXT: or a0, a4, a0 @@ -2738,8 +2738,8 @@ define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64XTHEADBB-NEXT: andi a3, a2, 31 ; RV64XTHEADBB-NEXT: srlw a4, a0, a2 ; RV64XTHEADBB-NEXT: srlw a2, a1, a2 -; RV64XTHEADBB-NEXT: negw a5, a3 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a5, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: sllw a0, a0, a5 ; RV64XTHEADBB-NEXT: sllw a1, a1, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 @@ -2850,7 +2850,7 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64I-NEXT: andi a3, a2, 63 ; RV64I-NEXT: srl a4, a0, a2 ; RV64I-NEXT: srl a2, a1, a2 -; RV64I-NEXT: negw a3, a3 +; RV64I-NEXT: neg a3, a3 ; RV64I-NEXT: sll a0, a0, a3 ; RV64I-NEXT: sll a1, a1, a3 ; RV64I-NEXT: or a0, a4, a0 @@ -3052,7 +3052,7 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64XTHEADBB-NEXT: andi a3, a2, 63 ; RV64XTHEADBB-NEXT: srl a4, a0, a2 ; RV64XTHEADBB-NEXT: srl a2, a1, a2 -; RV64XTHEADBB-NEXT: negw a3, a3 +; RV64XTHEADBB-NEXT: neg a3, a3 ; RV64XTHEADBB-NEXT: sll a0, a0, a3 ; RV64XTHEADBB-NEXT: sll a1, a1, a3 ; RV64XTHEADBB-NEXT: or a0, a4, a0 @@ -3116,7 +3116,7 @@ define i64 @rotl_64_zext(i64 %x, i32 %y) nounwind { ; RV64I-LABEL: rotl_64_zext: ; RV64I: # %bb.0: ; RV64I-NEXT: li a2, 64 -; RV64I-NEXT: subw a2, a2, a1 +; RV64I-NEXT: sub a2, a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -3171,7 +3171,7 @@ define i64 @rotl_64_zext(i64 %x, i32 %y) nounwind { ; RV64ZBB-LABEL: rotl_64_zext: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a2, 64 -; RV64ZBB-NEXT: subw a2, a2, a1 +; RV64ZBB-NEXT: sub a2, a2, a1 ; RV64ZBB-NEXT: sll a1, a0, a1 ; RV64ZBB-NEXT: srl a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -3226,7 +3226,7 @@ define i64 @rotl_64_zext(i64 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_zext: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: li a2, 64 -; RV64XTHEADBB-NEXT: subw a2, a2, a1 +; RV64XTHEADBB-NEXT: sub a2, a2, a1 ; RV64XTHEADBB-NEXT: sll a1, a0, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 @@ -3289,7 +3289,7 @@ define i64 @rotr_64_zext(i64 %x, i32 %y) nounwind { ; RV64I-LABEL: rotr_64_zext: ; RV64I: # %bb.0: ; RV64I-NEXT: li a2, 64 -; RV64I-NEXT: subw a2, a2, a1 +; RV64I-NEXT: sub a2, a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -3343,7 +3343,7 @@ define i64 @rotr_64_zext(i64 %x, i32 %y) nounwind { ; RV64ZBB-LABEL: rotr_64_zext: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a2, 64 -; RV64ZBB-NEXT: subw a2, a2, a1 +; RV64ZBB-NEXT: sub a2, a2, a1 ; RV64ZBB-NEXT: srl a1, a0, a1 ; RV64ZBB-NEXT: sll a0, a0, a2 ; RV64ZBB-NEXT: or a0, a1, a0 @@ -3397,7 +3397,7 @@ define i64 @rotr_64_zext(i64 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_zext: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: li a2, 64 -; RV64XTHEADBB-NEXT: subw a2, a2, a1 +; RV64XTHEADBB-NEXT: sub a2, a2, a1 ; RV64XTHEADBB-NEXT: srl a1, a0, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a2 ; RV64XTHEADBB-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll index 1eddb8fc2797e..b7f84ba696c26 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll @@ -107,7 +107,7 @@ declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -125,7 +125,7 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind { ; RV64I-LABEL: rol_i32_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a3, a1 +; RV64I-NEXT: neg a3, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a3 ; RV64I-NEXT: or a0, a1, a0 @@ -146,7 +146,7 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: rol_i32_neg_constant_rhs: ; RV64I: # %bb.0: ; RV64I-NEXT: li a1, -2 -; RV64I-NEXT: negw a2, a0 +; RV64I-NEXT: neg a2, a0 ; RV64I-NEXT: sllw a0, a1, a0 ; RV64I-NEXT: srlw a1, a1, a2 ; RV64I-NEXT: or a0, a0, a1 @@ -166,7 +166,7 @@ declare i64 @llvm.fshl.i64(i64, i64, i64) define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -185,7 +185,7 @@ declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -203,7 +203,7 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind { ; RV64I-LABEL: ror_i32_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a3, a1 +; RV64I-NEXT: neg a3, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a3 ; RV64I-NEXT: or a0, a1, a0 @@ -224,7 +224,7 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: ror_i32_neg_constant_rhs: ; RV64I: # %bb.0: ; RV64I-NEXT: li a1, -2 -; RV64I-NEXT: negw a2, a0 +; RV64I-NEXT: neg a2, a0 ; RV64I-NEXT: srlw a0, a1, a0 ; RV64I-NEXT: sllw a1, a1, a2 ; RV64I-NEXT: or a0, a0, a1 @@ -244,7 +244,7 @@ declare i64 @llvm.fshr.i64(i64, i64, i64) define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll index 9690302552090..162590fe8f4aa 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll @@ -31,7 +31,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -88,7 +88,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -103,7 +103,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind { ; RV64I-NEXT: call __muldi3 ; RV64I-NEXT: srliw a0, a0, 24 ; RV64I-NEXT: li a1, 32 -; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: j .LBB1_3 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: li a0, 32 @@ -153,7 +153,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -168,7 +168,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind { ; RV64I-NEXT: call __muldi3 ; RV64I-NEXT: srliw a0, a0, 24 ; RV64I-NEXT: li a1, 32 -; RV64I-NEXT: subw a1, a1, a0 +; RV64I-NEXT: sub a1, a1, a0 ; RV64I-NEXT: .LBB2_2: # %cond.end ; RV64I-NEXT: subw a0, s0, a1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -212,7 +212,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -283,7 +283,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -412,7 +412,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -455,7 +455,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -497,7 +497,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -553,7 +553,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -672,7 +672,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind { ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 @@ -728,7 +728,7 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind { ; RV64I-NEXT: and a1, a2, a1 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addi a2, a2, 819 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srliw a1, a0, 2 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll index 8b262db56ccd2..d634cc9f6395c 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll @@ -330,13 +330,13 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: li a3, 64 ; RV64I-NEXT: bltu a2, a3, .LBB6_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a4, a2, a3 +; RV64I-NEXT: sub a4, a2, a3 ; RV64I-NEXT: srl a4, a1, a4 ; RV64I-NEXT: bnez a2, .LBB6_3 ; RV64I-NEXT: j .LBB6_4 ; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: srl a4, a0, a2 -; RV64I-NEXT: negw a5, a2 +; RV64I-NEXT: neg a5, a2 ; RV64I-NEXT: sll a5, a1, a5 ; RV64I-NEXT: or a4, a4, a5 ; RV64I-NEXT: beqz a2, .LBB6_4 @@ -476,13 +476,13 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: li a3, 64 ; RV64I-NEXT: bltu a2, a3, .LBB7_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a4, a2, a3 +; RV64I-NEXT: sub a4, a2, a3 ; RV64I-NEXT: sra a4, a1, a4 ; RV64I-NEXT: bnez a2, .LBB7_3 ; RV64I-NEXT: j .LBB7_4 ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: srl a4, a0, a2 -; RV64I-NEXT: negw a5, a2 +; RV64I-NEXT: neg a5, a2 ; RV64I-NEXT: sll a5, a1, a5 ; RV64I-NEXT: or a4, a4, a5 ; RV64I-NEXT: beqz a2, .LBB7_4 @@ -615,13 +615,13 @@ define i128 @shl128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: bltu a2, a4, .LBB8_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: subw a4, a2, a4 +; RV64I-NEXT: sub a4, a2, a4 ; RV64I-NEXT: sll a3, a3, a4 ; RV64I-NEXT: bnez a2, .LBB8_3 ; RV64I-NEXT: j .LBB8_4 ; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: sll a0, a3, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srl a3, a3, a4 ; RV64I-NEXT: sll a4, a1, a2 ; RV64I-NEXT: or a3, a3, a4 @@ -685,7 +685,7 @@ define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind { ; ; RV64I-LABEL: fshr64_minsize: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -914,12 +914,12 @@ define i128 @fshr128_minsize(i128 %a, i128 %b) minsize nounwind { ; RV64I-NEXT: li a4, 64 ; RV64I-NEXT: bltu a5, a4, .LBB10_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a3, a5, a4 +; RV64I-NEXT: sub a3, a5, a4 ; RV64I-NEXT: srl a6, a1, a3 ; RV64I-NEXT: j .LBB10_3 ; RV64I-NEXT: .LBB10_2: ; RV64I-NEXT: srl a3, a0, a2 -; RV64I-NEXT: negw a6, a5 +; RV64I-NEXT: neg a6, a5 ; RV64I-NEXT: sll a6, a1, a6 ; RV64I-NEXT: or a6, a3, a6 ; RV64I-NEXT: .LBB10_3: @@ -928,7 +928,7 @@ define i128 @fshr128_minsize(i128 %a, i128 %b) minsize nounwind { ; RV64I-NEXT: # %bb.4: ; RV64I-NEXT: mv a3, a6 ; RV64I-NEXT: .LBB10_5: -; RV64I-NEXT: negw a7, a2 +; RV64I-NEXT: neg a7, a2 ; RV64I-NEXT: bltu a5, a4, .LBB10_7 ; RV64I-NEXT: # %bb.6: ; RV64I-NEXT: li a2, 0 @@ -940,13 +940,13 @@ define i128 @fshr128_minsize(i128 %a, i128 %b) minsize nounwind { ; RV64I-NEXT: bltu a6, a4, .LBB10_10 ; RV64I-NEXT: # %bb.9: ; RV64I-NEXT: li a5, 0 -; RV64I-NEXT: subw a4, a6, a4 +; RV64I-NEXT: sub a4, a6, a4 ; RV64I-NEXT: sll a0, a0, a4 ; RV64I-NEXT: bnez a6, .LBB10_11 ; RV64I-NEXT: j .LBB10_12 ; RV64I-NEXT: .LBB10_10: ; RV64I-NEXT: sll a5, a0, a7 -; RV64I-NEXT: negw a4, a6 +; RV64I-NEXT: neg a4, a6 ; RV64I-NEXT: srl a0, a0, a4 ; RV64I-NEXT: sll a4, a1, a7 ; RV64I-NEXT: or a0, a0, a4 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll index 69519c00f88ea..014b1c1b936ee 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll @@ -758,13 +758,13 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: or a3, a6, a7 ; RV64I-NEXT: bltu a1, a4, .LBB6_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a5, a1, a4 +; RV64I-NEXT: sub a5, a1, a4 ; RV64I-NEXT: srl a5, a3, a5 ; RV64I-NEXT: bnez a1, .LBB6_3 ; RV64I-NEXT: j .LBB6_4 ; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: srl a5, a0, a1 -; RV64I-NEXT: negw a6, a1 +; RV64I-NEXT: neg a6, a1 ; RV64I-NEXT: sll a6, a3, a6 ; RV64I-NEXT: or a5, a5, a6 ; RV64I-NEXT: beqz a1, .LBB6_4 @@ -1091,13 +1091,13 @@ define void @lshr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: or a3, a6, a7 ; RV64I-NEXT: bltu a1, a4, .LBB7_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a5, a1, a4 +; RV64I-NEXT: sub a5, a1, a4 ; RV64I-NEXT: srl a5, a3, a5 ; RV64I-NEXT: bnez a1, .LBB7_3 ; RV64I-NEXT: j .LBB7_4 ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: srl a5, a0, a1 -; RV64I-NEXT: negw a6, a1 +; RV64I-NEXT: neg a6, a1 ; RV64I-NEXT: sll a6, a3, a6 ; RV64I-NEXT: or a5, a5, a6 ; RV64I-NEXT: beqz a1, .LBB7_4 @@ -1425,13 +1425,13 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bltu a3, a5, .LBB8_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: subw a5, a3, a5 +; RV64I-NEXT: sub a5, a3, a5 ; RV64I-NEXT: sll a4, a4, a5 ; RV64I-NEXT: bnez a3, .LBB8_3 ; RV64I-NEXT: j .LBB8_4 ; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: sll a1, a4, a3 -; RV64I-NEXT: negw a5, a3 +; RV64I-NEXT: neg a5, a3 ; RV64I-NEXT: srl a4, a4, a5 ; RV64I-NEXT: sll a5, a0, a3 ; RV64I-NEXT: or a4, a4, a5 @@ -1754,13 +1754,13 @@ define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw ; RV64I-NEXT: bltu a3, a5, .LBB9_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: subw a5, a3, a5 +; RV64I-NEXT: sub a5, a3, a5 ; RV64I-NEXT: sll a4, a4, a5 ; RV64I-NEXT: bnez a3, .LBB9_3 ; RV64I-NEXT: j .LBB9_4 ; RV64I-NEXT: .LBB9_2: ; RV64I-NEXT: sll a1, a4, a3 -; RV64I-NEXT: negw a5, a3 +; RV64I-NEXT: neg a5, a3 ; RV64I-NEXT: srl a4, a4, a5 ; RV64I-NEXT: sll a5, a0, a3 ; RV64I-NEXT: or a4, a4, a5 @@ -2083,13 +2083,13 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: or a3, a6, a7 ; RV64I-NEXT: bltu a1, a4, .LBB10_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a5, a1, a4 +; RV64I-NEXT: sub a5, a1, a4 ; RV64I-NEXT: sra a5, a3, a5 ; RV64I-NEXT: bnez a1, .LBB10_3 ; RV64I-NEXT: j .LBB10_4 ; RV64I-NEXT: .LBB10_2: ; RV64I-NEXT: srl a5, a0, a1 -; RV64I-NEXT: negw a6, a1 +; RV64I-NEXT: neg a6, a1 ; RV64I-NEXT: sll a6, a3, a6 ; RV64I-NEXT: or a5, a5, a6 ; RV64I-NEXT: beqz a1, .LBB10_4 @@ -2416,13 +2416,13 @@ define void @ashr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: or a3, a6, a7 ; RV64I-NEXT: bltu a1, a4, .LBB11_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: subw a5, a1, a4 +; RV64I-NEXT: sub a5, a1, a4 ; RV64I-NEXT: sra a5, a3, a5 ; RV64I-NEXT: bnez a1, .LBB11_3 ; RV64I-NEXT: j .LBB11_4 ; RV64I-NEXT: .LBB11_2: ; RV64I-NEXT: srl a5, a0, a1 -; RV64I-NEXT: negw a6, a1 +; RV64I-NEXT: neg a6, a1 ; RV64I-NEXT: sll a6, a3, a6 ; RV64I-NEXT: or a5, a5, a6 ; RV64I-NEXT: beqz a1, .LBB11_4 @@ -2796,8 +2796,8 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: or t0, t5, t3 ; RV64I-NEXT: or a5, s0, t6 ; RV64I-NEXT: slli a5, a5, 3 -; RV64I-NEXT: subw t1, a5, a7 -; RV64I-NEXT: negw t5, a5 +; RV64I-NEXT: sub t1, a5, a7 +; RV64I-NEXT: neg t5, a5 ; RV64I-NEXT: sll t3, t0, t5 ; RV64I-NEXT: bltu a5, a7, .LBB12_2 ; RV64I-NEXT: # %bb.1: @@ -2842,7 +2842,7 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bgeu t6, a7, .LBB12_14 ; RV64I-NEXT: .LBB12_12: ; RV64I-NEXT: sll t5, a6, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a6, s0 ; RV64I-NEXT: or s1, s0, t3 ; RV64I-NEXT: j .LBB12_15 @@ -2851,7 +2851,7 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bltu t6, a7, .LBB12_12 ; RV64I-NEXT: .LBB12_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t3, t6, a7 +; RV64I-NEXT: sub t3, t6, a7 ; RV64I-NEXT: sll s1, a6, t3 ; RV64I-NEXT: .LBB12_15: ; RV64I-NEXT: sub s0, a5, t1 @@ -2862,13 +2862,13 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: .LBB12_17: ; RV64I-NEXT: bltu s0, a7, .LBB12_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, a7 +; RV64I-NEXT: sub t6, s0, a7 ; RV64I-NEXT: srl t6, t0, t6 ; RV64I-NEXT: bnez s0, .LBB12_20 ; RV64I-NEXT: j .LBB12_21 ; RV64I-NEXT: .LBB12_19: ; RV64I-NEXT: srl t6, a6, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, t0, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB12_21 @@ -3720,8 +3720,8 @@ define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: or t0, t5, t3 ; RV64I-NEXT: or a5, s0, t6 ; RV64I-NEXT: slli a5, a5, 5 -; RV64I-NEXT: subw t1, a5, a7 -; RV64I-NEXT: negw t5, a5 +; RV64I-NEXT: sub t1, a5, a7 +; RV64I-NEXT: neg t5, a5 ; RV64I-NEXT: sll t3, t0, t5 ; RV64I-NEXT: bltu a5, a7, .LBB13_2 ; RV64I-NEXT: # %bb.1: @@ -3766,7 +3766,7 @@ define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: bgeu t6, a7, .LBB13_14 ; RV64I-NEXT: .LBB13_12: ; RV64I-NEXT: sll t5, a6, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a6, s0 ; RV64I-NEXT: or s1, s0, t3 ; RV64I-NEXT: j .LBB13_15 @@ -3775,7 +3775,7 @@ define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: bltu t6, a7, .LBB13_12 ; RV64I-NEXT: .LBB13_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t3, t6, a7 +; RV64I-NEXT: sub t3, t6, a7 ; RV64I-NEXT: sll s1, a6, t3 ; RV64I-NEXT: .LBB13_15: ; RV64I-NEXT: sub s0, a5, t1 @@ -3786,13 +3786,13 @@ define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: .LBB13_17: ; RV64I-NEXT: bltu s0, a7, .LBB13_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, a7 +; RV64I-NEXT: sub t6, s0, a7 ; RV64I-NEXT: srl t6, t0, t6 ; RV64I-NEXT: bnez s0, .LBB13_20 ; RV64I-NEXT: j .LBB13_21 ; RV64I-NEXT: .LBB13_19: ; RV64I-NEXT: srl t6, a6, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, t0, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB13_21 @@ -4644,8 +4644,8 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: or t0, t5, t3 ; RV64I-NEXT: or a5, s0, t6 ; RV64I-NEXT: slli a5, a5, 6 -; RV64I-NEXT: subw t1, a5, a7 -; RV64I-NEXT: negw t5, a5 +; RV64I-NEXT: sub t1, a5, a7 +; RV64I-NEXT: neg t5, a5 ; RV64I-NEXT: sll t3, t0, t5 ; RV64I-NEXT: bltu a5, a7, .LBB14_2 ; RV64I-NEXT: # %bb.1: @@ -4690,7 +4690,7 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: bgeu t6, a7, .LBB14_14 ; RV64I-NEXT: .LBB14_12: ; RV64I-NEXT: sll t5, a6, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a6, s0 ; RV64I-NEXT: or s1, s0, t3 ; RV64I-NEXT: j .LBB14_15 @@ -4699,7 +4699,7 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: bltu t6, a7, .LBB14_12 ; RV64I-NEXT: .LBB14_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t3, t6, a7 +; RV64I-NEXT: sub t3, t6, a7 ; RV64I-NEXT: sll s1, a6, t3 ; RV64I-NEXT: .LBB14_15: ; RV64I-NEXT: sub s0, a5, t1 @@ -4710,13 +4710,13 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: .LBB14_17: ; RV64I-NEXT: bltu s0, a7, .LBB14_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, a7 +; RV64I-NEXT: sub t6, s0, a7 ; RV64I-NEXT: srl t6, t0, t6 ; RV64I-NEXT: bnez s0, .LBB14_20 ; RV64I-NEXT: j .LBB14_21 ; RV64I-NEXT: .LBB14_19: ; RV64I-NEXT: srl t6, a6, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, t0, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB14_21 @@ -5542,8 +5542,8 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: or a5, s0, a6 ; RV64I-NEXT: or a6, a1, s5 ; RV64I-NEXT: slli a6, a6, 3 -; RV64I-NEXT: subw t2, a6, t0 -; RV64I-NEXT: negw t3, a6 +; RV64I-NEXT: sub t2, a6, t0 +; RV64I-NEXT: neg t3, a6 ; RV64I-NEXT: srl s0, t1, t3 ; RV64I-NEXT: bltu a6, t0, .LBB15_2 ; RV64I-NEXT: # %bb.1: @@ -5585,11 +5585,11 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: slli s4, s9, 16 ; RV64I-NEXT: bltu a4, t0, .LBB15_7 ; RV64I-NEXT: # %bb.6: -; RV64I-NEXT: subw s0, a4, t0 +; RV64I-NEXT: sub s0, a4, t0 ; RV64I-NEXT: srl s0, a5, s0 ; RV64I-NEXT: j .LBB15_8 ; RV64I-NEXT: .LBB15_7: -; RV64I-NEXT: negw s6, a4 +; RV64I-NEXT: neg s6, a4 ; RV64I-NEXT: sll s6, a5, s6 ; RV64I-NEXT: or s0, s0, s6 ; RV64I-NEXT: .LBB15_8: @@ -5637,13 +5637,13 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bltu s0, t0, .LBB15_20 ; RV64I-NEXT: # %bb.19: ; RV64I-NEXT: li t2, 0 -; RV64I-NEXT: subw t0, s0, t0 +; RV64I-NEXT: sub t0, s0, t0 ; RV64I-NEXT: sll t0, t1, t0 ; RV64I-NEXT: bnez s0, .LBB15_21 ; RV64I-NEXT: j .LBB15_22 ; RV64I-NEXT: .LBB15_20: ; RV64I-NEXT: sll t2, t1, s0 -; RV64I-NEXT: negw t0, s0 +; RV64I-NEXT: neg t0, s0 ; RV64I-NEXT: srl t0, t1, t0 ; RV64I-NEXT: sll t1, a5, s0 ; RV64I-NEXT: or t0, t0, t1 @@ -6456,8 +6456,8 @@ define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw ; RV64I-NEXT: or a5, s0, a6 ; RV64I-NEXT: or a6, a1, s5 ; RV64I-NEXT: slli a6, a6, 5 -; RV64I-NEXT: subw t2, a6, t0 -; RV64I-NEXT: negw t3, a6 +; RV64I-NEXT: sub t2, a6, t0 +; RV64I-NEXT: neg t3, a6 ; RV64I-NEXT: srl s0, t1, t3 ; RV64I-NEXT: bltu a6, t0, .LBB16_2 ; RV64I-NEXT: # %bb.1: @@ -6499,11 +6499,11 @@ define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw ; RV64I-NEXT: slli s4, s9, 16 ; RV64I-NEXT: bltu a4, t0, .LBB16_7 ; RV64I-NEXT: # %bb.6: -; RV64I-NEXT: subw s0, a4, t0 +; RV64I-NEXT: sub s0, a4, t0 ; RV64I-NEXT: srl s0, a5, s0 ; RV64I-NEXT: j .LBB16_8 ; RV64I-NEXT: .LBB16_7: -; RV64I-NEXT: negw s6, a4 +; RV64I-NEXT: neg s6, a4 ; RV64I-NEXT: sll s6, a5, s6 ; RV64I-NEXT: or s0, s0, s6 ; RV64I-NEXT: .LBB16_8: @@ -6551,13 +6551,13 @@ define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw ; RV64I-NEXT: bltu s0, t0, .LBB16_20 ; RV64I-NEXT: # %bb.19: ; RV64I-NEXT: li t2, 0 -; RV64I-NEXT: subw t0, s0, t0 +; RV64I-NEXT: sub t0, s0, t0 ; RV64I-NEXT: sll t0, t1, t0 ; RV64I-NEXT: bnez s0, .LBB16_21 ; RV64I-NEXT: j .LBB16_22 ; RV64I-NEXT: .LBB16_20: ; RV64I-NEXT: sll t2, t1, s0 -; RV64I-NEXT: negw t0, s0 +; RV64I-NEXT: neg t0, s0 ; RV64I-NEXT: srl t0, t1, t0 ; RV64I-NEXT: sll t1, a5, s0 ; RV64I-NEXT: or t0, t0, t1 @@ -7370,8 +7370,8 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou ; RV64I-NEXT: or a5, s0, a6 ; RV64I-NEXT: or a6, a1, s5 ; RV64I-NEXT: slli a6, a6, 6 -; RV64I-NEXT: subw t2, a6, t0 -; RV64I-NEXT: negw t3, a6 +; RV64I-NEXT: sub t2, a6, t0 +; RV64I-NEXT: neg t3, a6 ; RV64I-NEXT: srl s0, t1, t3 ; RV64I-NEXT: bltu a6, t0, .LBB17_2 ; RV64I-NEXT: # %bb.1: @@ -7413,11 +7413,11 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou ; RV64I-NEXT: slli s4, s9, 16 ; RV64I-NEXT: bltu a4, t0, .LBB17_7 ; RV64I-NEXT: # %bb.6: -; RV64I-NEXT: subw s0, a4, t0 +; RV64I-NEXT: sub s0, a4, t0 ; RV64I-NEXT: srl s0, a5, s0 ; RV64I-NEXT: j .LBB17_8 ; RV64I-NEXT: .LBB17_7: -; RV64I-NEXT: negw s6, a4 +; RV64I-NEXT: neg s6, a4 ; RV64I-NEXT: sll s6, a5, s6 ; RV64I-NEXT: or s0, s0, s6 ; RV64I-NEXT: .LBB17_8: @@ -7465,13 +7465,13 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou ; RV64I-NEXT: bltu s0, t0, .LBB17_20 ; RV64I-NEXT: # %bb.19: ; RV64I-NEXT: li t2, 0 -; RV64I-NEXT: subw t0, s0, t0 +; RV64I-NEXT: sub t0, s0, t0 ; RV64I-NEXT: sll t0, t1, t0 ; RV64I-NEXT: bnez s0, .LBB17_21 ; RV64I-NEXT: j .LBB17_22 ; RV64I-NEXT: .LBB17_20: ; RV64I-NEXT: sll t2, t1, s0 -; RV64I-NEXT: negw t0, s0 +; RV64I-NEXT: neg t0, s0 ; RV64I-NEXT: srl t0, t1, t0 ; RV64I-NEXT: sll t1, a5, s0 ; RV64I-NEXT: or t0, t0, t1 @@ -8310,8 +8310,8 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: or a5, t5, t4 ; RV64I-NEXT: or a6, s0, t6 ; RV64I-NEXT: slli a6, a6, 3 -; RV64I-NEXT: subw t1, a6, t0 -; RV64I-NEXT: negw t5, a6 +; RV64I-NEXT: sub t1, a6, t0 +; RV64I-NEXT: neg t5, a6 ; RV64I-NEXT: sll t4, a5, t5 ; RV64I-NEXT: bltu a6, t0, .LBB18_2 ; RV64I-NEXT: # %bb.1: @@ -8356,7 +8356,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bgeu t6, t0, .LBB18_14 ; RV64I-NEXT: .LBB18_12: ; RV64I-NEXT: sll t5, a7, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a7, s0 ; RV64I-NEXT: or s1, s0, t4 ; RV64I-NEXT: j .LBB18_15 @@ -8365,7 +8365,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: bltu t6, t0, .LBB18_12 ; RV64I-NEXT: .LBB18_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t4, t6, t0 +; RV64I-NEXT: sub t4, t6, t0 ; RV64I-NEXT: sll s1, a7, t4 ; RV64I-NEXT: .LBB18_15: ; RV64I-NEXT: sub s0, a6, t1 @@ -8376,13 +8376,13 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: .LBB18_17: ; RV64I-NEXT: bltu s0, t0, .LBB18_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, t0 +; RV64I-NEXT: sub t6, s0, t0 ; RV64I-NEXT: sra t6, a5, t6 ; RV64I-NEXT: bnez s0, .LBB18_20 ; RV64I-NEXT: j .LBB18_21 ; RV64I-NEXT: .LBB18_19: ; RV64I-NEXT: srl t6, a7, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, a5, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB18_21 @@ -9241,8 +9241,8 @@ define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: or a5, t5, t4 ; RV64I-NEXT: or a6, s0, t6 ; RV64I-NEXT: slli a6, a6, 5 -; RV64I-NEXT: subw t1, a6, t0 -; RV64I-NEXT: negw t5, a6 +; RV64I-NEXT: sub t1, a6, t0 +; RV64I-NEXT: neg t5, a6 ; RV64I-NEXT: sll t4, a5, t5 ; RV64I-NEXT: bltu a6, t0, .LBB19_2 ; RV64I-NEXT: # %bb.1: @@ -9287,7 +9287,7 @@ define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: bgeu t6, t0, .LBB19_14 ; RV64I-NEXT: .LBB19_12: ; RV64I-NEXT: sll t5, a7, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a7, s0 ; RV64I-NEXT: or s1, s0, t4 ; RV64I-NEXT: j .LBB19_15 @@ -9296,7 +9296,7 @@ define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: bltu t6, t0, .LBB19_12 ; RV64I-NEXT: .LBB19_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t4, t6, t0 +; RV64I-NEXT: sub t4, t6, t0 ; RV64I-NEXT: sll s1, a7, t4 ; RV64I-NEXT: .LBB19_15: ; RV64I-NEXT: sub s0, a6, t1 @@ -9307,13 +9307,13 @@ define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun ; RV64I-NEXT: .LBB19_17: ; RV64I-NEXT: bltu s0, t0, .LBB19_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, t0 +; RV64I-NEXT: sub t6, s0, t0 ; RV64I-NEXT: sra t6, a5, t6 ; RV64I-NEXT: bnez s0, .LBB19_20 ; RV64I-NEXT: j .LBB19_21 ; RV64I-NEXT: .LBB19_19: ; RV64I-NEXT: srl t6, a7, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, a5, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB19_21 @@ -10172,8 +10172,8 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: or a5, t5, t4 ; RV64I-NEXT: or a6, s0, t6 ; RV64I-NEXT: slli a6, a6, 6 -; RV64I-NEXT: subw t1, a6, t0 -; RV64I-NEXT: negw t5, a6 +; RV64I-NEXT: sub t1, a6, t0 +; RV64I-NEXT: neg t5, a6 ; RV64I-NEXT: sll t4, a5, t5 ; RV64I-NEXT: bltu a6, t0, .LBB20_2 ; RV64I-NEXT: # %bb.1: @@ -10218,7 +10218,7 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: bgeu t6, t0, .LBB20_14 ; RV64I-NEXT: .LBB20_12: ; RV64I-NEXT: sll t5, a7, t5 -; RV64I-NEXT: negw s0, t6 +; RV64I-NEXT: neg s0, t6 ; RV64I-NEXT: srl s0, a7, s0 ; RV64I-NEXT: or s1, s0, t4 ; RV64I-NEXT: j .LBB20_15 @@ -10227,7 +10227,7 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: bltu t6, t0, .LBB20_12 ; RV64I-NEXT: .LBB20_14: ; RV64I-NEXT: li t5, 0 -; RV64I-NEXT: subw t4, t6, t0 +; RV64I-NEXT: sub t4, t6, t0 ; RV64I-NEXT: sll s1, a7, t4 ; RV64I-NEXT: .LBB20_15: ; RV64I-NEXT: sub s0, a6, t1 @@ -10238,13 +10238,13 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no ; RV64I-NEXT: .LBB20_17: ; RV64I-NEXT: bltu s0, t0, .LBB20_19 ; RV64I-NEXT: # %bb.18: -; RV64I-NEXT: subw t6, s0, t0 +; RV64I-NEXT: sub t6, s0, t0 ; RV64I-NEXT: sra t6, a5, t6 ; RV64I-NEXT: bnez s0, .LBB20_20 ; RV64I-NEXT: j .LBB20_21 ; RV64I-NEXT: .LBB20_19: ; RV64I-NEXT: srl t6, a7, s0 -; RV64I-NEXT: negw s1, s0 +; RV64I-NEXT: neg s1, s0 ; RV64I-NEXT: sll s1, a5, s1 ; RV64I-NEXT: or t6, t6, s1 ; RV64I-NEXT: beqz s0, .LBB20_21 diff --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll index 3fb0f2c53bdf0..41f73f51fe7b6 100644 --- a/llvm/test/CodeGen/RISCV/abds-neg.ll +++ b/llvm/test/CodeGen/RISCV/abds-neg.ll @@ -2221,7 +2221,7 @@ define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind { ; ; RV64I-LABEL: abd_subnsw_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 @@ -2236,7 +2236,7 @@ define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind { ; ; RV64ZBB-LABEL: abd_subnsw_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 ; RV64ZBB-NEXT: sraiw a1, a0, 31 ; RV64ZBB-NEXT: xor a0, a0, a1 ; RV64ZBB-NEXT: subw a0, a1, a0 @@ -2258,7 +2258,7 @@ define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV64I-LABEL: abd_subnsw_i32_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 @@ -2273,7 +2273,7 @@ define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV64ZBB-LABEL: abd_subnsw_i32_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 ; RV64ZBB-NEXT: sraiw a1, a0, 31 ; RV64ZBB-NEXT: xor a0, a0, a1 ; RV64ZBB-NEXT: subw a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll index efb4e1a6f15d6..28a95ef4f8de9 100644 --- a/llvm/test/CodeGen/RISCV/abds.ll +++ b/llvm/test/CodeGen/RISCV/abds.ll @@ -1733,21 +1733,13 @@ define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; -; RV32ZBB-LABEL: abd_subnsw_i8: -; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: sext.b a0, a0 -; RV32ZBB-NEXT: neg a1, a0 -; RV32ZBB-NEXT: max a0, a0, a1 -; RV32ZBB-NEXT: ret -; -; RV64ZBB-LABEL: abd_subnsw_i8: -; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 -; RV64ZBB-NEXT: sext.b a0, a0 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 -; RV64ZBB-NEXT: ret +; ZBB-LABEL: abd_subnsw_i8: +; ZBB: # %bb.0: +; ZBB-NEXT: sub a0, a0, a1 +; ZBB-NEXT: sext.b a0, a0 +; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: ret %sub = sub nsw i8 %a, %b %abs = call i8 @llvm.abs.i8(i8 %sub, i1 false) ret i8 %abs @@ -1772,21 +1764,13 @@ define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; -; RV32ZBB-LABEL: abd_subnsw_i8_undef: -; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: sext.b a0, a0 -; RV32ZBB-NEXT: neg a1, a0 -; RV32ZBB-NEXT: max a0, a0, a1 -; RV32ZBB-NEXT: ret -; -; RV64ZBB-LABEL: abd_subnsw_i8_undef: -; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 -; RV64ZBB-NEXT: sext.b a0, a0 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 -; RV64ZBB-NEXT: ret +; ZBB-LABEL: abd_subnsw_i8_undef: +; ZBB: # %bb.0: +; ZBB-NEXT: sub a0, a0, a1 +; ZBB-NEXT: sext.b a0, a0 +; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: ret %sub = sub nsw i8 %a, %b %abs = call i8 @llvm.abs.i8(i8 %sub, i1 true) ret i8 %abs @@ -1811,21 +1795,13 @@ define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; -; RV32ZBB-LABEL: abd_subnsw_i16: -; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: sext.h a0, a0 -; RV32ZBB-NEXT: neg a1, a0 -; RV32ZBB-NEXT: max a0, a0, a1 -; RV32ZBB-NEXT: ret -; -; RV64ZBB-LABEL: abd_subnsw_i16: -; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 -; RV64ZBB-NEXT: sext.h a0, a0 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 -; RV64ZBB-NEXT: ret +; ZBB-LABEL: abd_subnsw_i16: +; ZBB: # %bb.0: +; ZBB-NEXT: sub a0, a0, a1 +; ZBB-NEXT: sext.h a0, a0 +; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: ret %sub = sub nsw i16 %a, %b %abs = call i16 @llvm.abs.i16(i16 %sub, i1 false) ret i16 %abs @@ -1850,21 +1826,13 @@ define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; -; RV32ZBB-LABEL: abd_subnsw_i16_undef: -; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: sext.h a0, a0 -; RV32ZBB-NEXT: neg a1, a0 -; RV32ZBB-NEXT: max a0, a0, a1 -; RV32ZBB-NEXT: ret -; -; RV64ZBB-LABEL: abd_subnsw_i16_undef: -; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: subw a0, a0, a1 -; RV64ZBB-NEXT: sext.h a0, a0 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 -; RV64ZBB-NEXT: ret +; ZBB-LABEL: abd_subnsw_i16_undef: +; ZBB: # %bb.0: +; ZBB-NEXT: sub a0, a0, a1 +; ZBB-NEXT: sext.h a0, a0 +; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: ret %sub = sub nsw i16 %a, %b %abs = call i16 @llvm.abs.i16(i16 %sub, i1 true) ret i16 %abs @@ -1881,7 +1849,7 @@ define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind { ; ; RV64I-LABEL: abd_subnsw_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a0, a1 @@ -1916,7 +1884,7 @@ define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV64I-LABEL: abd_subnsw_i32_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a0, a1 @@ -2317,7 +2285,7 @@ define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind { ; ; RV64I-LABEL: abd_sub_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll index aac355e3f055b..3b2cab2b66303 100644 --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -20,7 +20,7 @@ define i32 @add_mul_combine_accept_a1(i32 %x) { ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: sh1add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 @@ -41,7 +41,7 @@ define signext i32 @add_mul_combine_accept_a2(i32 signext %x) { ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: sh1add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 @@ -93,7 +93,7 @@ define i32 @add_mul_combine_accept_b1(i32 %x) { ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addi a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 @@ -118,7 +118,7 @@ define signext i32 @add_mul_combine_accept_b2(i32 signext %x) { ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addi a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 @@ -456,7 +456,7 @@ define i32 @add_mul_combine_reject_f1(i32 %x) { ; RV64IMB-NEXT: addi a0, a0, 1972 ; RV64IMB-NEXT: sh1add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 @@ -479,7 +479,7 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) { ; RV64IMB-NEXT: addi a0, a0, 1972 ; RV64IMB-NEXT: sh1add a1, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 5 -; RV64IMB-NEXT: subw a0, a0, a1 +; RV64IMB-NEXT: sub a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll index f3f71a923bdc2..34549a06dd298 100644 --- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll +++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll @@ -16,7 +16,7 @@ define void @quux(i32 signext %arg, i32 signext %arg1) nounwind { ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: subw s0, a1, a0 +; RV64I-NEXT: sub s0, a1, a0 ; RV64I-NEXT: .LBB0_2: # %bb2 ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: call hoge diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll index 27704d107f93d..ea9786d0b10b3 100644 --- a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll +++ b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll @@ -161,7 +161,7 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) { ; RV64IA-NEXT: sltu t0, t0, a5 ; RV64IA-NEXT: addi t0, t0, -1 ; RV64IA-NEXT: and t0, t0, a1 -; RV64IA-NEXT: subw a6, a6, t0 +; RV64IA-NEXT: sub a6, a6, t0 ; RV64IA-NEXT: zext.b a6, a6 ; RV64IA-NEXT: sllw a6, a6, a0 ; RV64IA-NEXT: and a3, a3, a4 @@ -345,7 +345,7 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) { ; RV64IA-NEXT: sltu t1, t1, a6 ; RV64IA-NEXT: addi t1, t1, -1 ; RV64IA-NEXT: and t1, t1, a1 -; RV64IA-NEXT: subw a7, a7, t1 +; RV64IA-NEXT: sub a7, a7, t1 ; RV64IA-NEXT: and a7, a7, a3 ; RV64IA-NEXT: sllw a7, a7, a0 ; RV64IA-NEXT: and a4, a4, a5 diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll index ada1933d91d60..4e04f38a6301d 100644 --- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll +++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll @@ -150,7 +150,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) { ; RV64IA-NEXT: zext.b a7, a5 ; RV64IA-NEXT: addi a5, a5, 1 ; RV64IA-NEXT: sltu a7, a7, a1 -; RV64IA-NEXT: negw a7, a7 +; RV64IA-NEXT: neg a7, a7 ; RV64IA-NEXT: and a5, a7, a5 ; RV64IA-NEXT: zext.b a5, a5 ; RV64IA-NEXT: sllw a5, a5, a0 @@ -325,7 +325,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) { ; RV64IA-NEXT: addi a6, a6, 1 ; RV64IA-NEXT: sltu t0, t0, a1 ; RV64IA-NEXT: and a6, a6, a3 -; RV64IA-NEXT: negw t0, t0 +; RV64IA-NEXT: neg t0, t0 ; RV64IA-NEXT: and a6, t0, a6 ; RV64IA-NEXT: sllw a6, a6, a0 ; RV64IA-NEXT: and a4, a4, a5 diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll index 724891853630a..530980c13116c 100644 --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -63,7 +63,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind { ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: srli a1, a0, 1 ; RV64NOZBB-NEXT: andi a1, a1, 85 -; RV64NOZBB-NEXT: subw a0, a0, a1 +; RV64NOZBB-NEXT: sub a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 51 ; RV64NOZBB-NEXT: srli a0, a0, 2 ; RV64NOZBB-NEXT: andi a0, a0, 51 @@ -262,7 +262,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV64I-NEXT: sext.w a1, a0 ; RV64I-NEXT: beqz a1, .LBB2_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -270,16 +270,16 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -318,7 +318,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV64M-NEXT: sext.w a1, a0 ; RV64M-NEXT: beqz a1, .LBB2_2 ; RV64M-NEXT: # %bb.1: # %cond.false -; RV64M-NEXT: negw a1, a0 +; RV64M-NEXT: neg a1, a0 ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: lui a1, 30667 ; RV64M-NEXT: addi a1, a1, 1329 @@ -597,7 +597,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: srli a1, a0, 1 ; RV64NOZBB-NEXT: andi a1, a1, 85 -; RV64NOZBB-NEXT: subw a0, a0, a1 +; RV64NOZBB-NEXT: sub a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 51 ; RV64NOZBB-NEXT: srli a0, a0, 2 ; RV64NOZBB-NEXT: andi a0, a0, 51 @@ -743,7 +743,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; ; RV64I-LABEL: test_cttz_i32_zero_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -751,16 +751,16 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -788,7 +788,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; ; RV64M-LABEL: test_cttz_i32_zero_undef: ; RV64M: # %bb.0: -; RV64M-NEXT: negw a1, a0 +; RV64M-NEXT: neg a1, a0 ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: lui a1, 30667 ; RV64M-NEXT: addi a1, a1, 1329 @@ -1039,7 +1039,7 @@ define i8 @test_ctlz_i8(i8 %a) nounwind { ; RV64NOZBB-NEXT: not a0, a0 ; RV64NOZBB-NEXT: srli a1, a0, 1 ; RV64NOZBB-NEXT: andi a1, a1, 85 -; RV64NOZBB-NEXT: subw a0, a0, a1 +; RV64NOZBB-NEXT: sub a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 51 ; RV64NOZBB-NEXT: srli a0, a0, 2 ; RV64NOZBB-NEXT: andi a0, a0, 51 @@ -1711,7 +1711,7 @@ define i8 @test_ctlz_i8_zero_undef(i8 %a) nounwind { ; RV64NOZBB-NEXT: not a0, a0 ; RV64NOZBB-NEXT: srli a1, a0, 1 ; RV64NOZBB-NEXT: andi a1, a1, 85 -; RV64NOZBB-NEXT: subw a0, a0, a1 +; RV64NOZBB-NEXT: sub a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 51 ; RV64NOZBB-NEXT: srli a0, a0, 2 ; RV64NOZBB-NEXT: andi a0, a0, 51 @@ -2296,7 +2296,7 @@ define i8 @test_ctpop_i8(i8 %a) nounwind { ; RV64NOZBB: # %bb.0: ; RV64NOZBB-NEXT: srli a1, a0, 1 ; RV64NOZBB-NEXT: andi a1, a1, 85 -; RV64NOZBB-NEXT: subw a0, a0, a1 +; RV64NOZBB-NEXT: sub a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 51 ; RV64NOZBB-NEXT: srli a0, a0, 2 ; RV64NOZBB-NEXT: andi a0, a0, 51 @@ -2336,7 +2336,7 @@ define i8 @test_ctpop_i8(i8 %a) nounwind { ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srli a1, a0, 1 ; RV64XTHEADBB-NEXT: andi a1, a1, 85 -; RV64XTHEADBB-NEXT: subw a0, a0, a1 +; RV64XTHEADBB-NEXT: sub a0, a0, a1 ; RV64XTHEADBB-NEXT: andi a1, a0, 51 ; RV64XTHEADBB-NEXT: srli a0, a0, 2 ; RV64XTHEADBB-NEXT: andi a0, a0, 51 diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll index 637fb314e9536..a1061fbbbbf02 100644 --- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll +++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll @@ -163,7 +163,7 @@ define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind { ; RV64I-LABEL: ctz_dereferencing_pointer_zext: ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: lw a0, 0(a0) -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -171,16 +171,16 @@ define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -248,7 +248,7 @@ define signext i32 @ctz1(i32 signext %x) nounwind { ; ; RV64I-LABEL: ctz1: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -256,16 +256,16 @@ define signext i32 @ctz1(i32 signext %x) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -331,7 +331,7 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind { ; ; RV64I-LABEL: ctz1_flipped: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -339,16 +339,16 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -412,7 +412,7 @@ define signext i32 @ctz2(i32 signext %x) nounwind { ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: beqz a0, .LBB4_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -420,16 +420,16 @@ define signext i32 @ctz2(i32 signext %x) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -490,7 +490,7 @@ define signext i32 @ctz3(i32 signext %x) nounwind { ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: beqz a0, .LBB5_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -498,16 +498,16 @@ define signext i32 @ctz3(i32 signext %x) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -824,7 +824,7 @@ define signext i32 @ctz5(i32 signext %x) nounwind { ; ; RV64I-LABEL: ctz5: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -832,16 +832,16 @@ define signext i32 @ctz5(i32 signext %x) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -907,7 +907,7 @@ define signext i32 @ctz6(i32 signext %x) nounwind { ; ; RV64I-LABEL: ctz6: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -915,16 +915,16 @@ define signext i32 @ctz6(i32 signext %x) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -997,7 +997,7 @@ define signext i32 @globalVar() nounwind { ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: lui a0, %hi(global_x) ; RV64I-NEXT: lw a0, %lo(global_x)(a0) -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -1005,16 +1005,16 @@ define signext i32 @globalVar() nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll index ea8b04d727acf..53c3f5841ba0f 100644 --- a/llvm/test/CodeGen/RISCV/div-by-constant.ll +++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll @@ -54,7 +54,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind { ; RV64IM-NEXT: slli a2, a2, 32 ; RV64IM-NEXT: mulhu a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: subw a0, a0, a1 +; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: srliw a0, a0, 1 ; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: srli a0, a0, 2 @@ -67,7 +67,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind { ; RV64IMZB-NEXT: addi a2, a2, -1755 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 32 -; RV64IMZB-NEXT: subw a0, a0, a1 +; RV64IMZB-NEXT: sub a0, a0, a1 ; RV64IMZB-NEXT: srliw a0, a0, 1 ; RV64IMZB-NEXT: add a0, a0, a1 ; RV64IMZB-NEXT: srli a0, a0, 2 @@ -193,7 +193,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind { ; RV64IM-NEXT: li a2, 37 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 8 -; RV64IM-NEXT: subw a0, a0, a1 +; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srli a0, a0, 57 ; RV64IM-NEXT: add a0, a0, a1 @@ -206,7 +206,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind { ; RV64IMZB-NEXT: sh3add a2, a1, a1 ; RV64IMZB-NEXT: sh2add a1, a2, a1 ; RV64IMZB-NEXT: srli a1, a1, 8 -; RV64IMZB-NEXT: subw a0, a0, a1 +; RV64IMZB-NEXT: sub a0, a0, a1 ; RV64IMZB-NEXT: slli a0, a0, 56 ; RV64IMZB-NEXT: srli a0, a0, 57 ; RV64IMZB-NEXT: add a0, a0, a1 @@ -257,7 +257,7 @@ define i16 @udiv16_constant_add(i16 %a) nounwind { ; RV64-NEXT: lui a2, 149808 ; RV64-NEXT: mulhu a1, a1, a2 ; RV64-NEXT: srli a1, a1, 16 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 49 ; RV64-NEXT: add a0, a0, a1 @@ -367,7 +367,7 @@ define i32 @sdiv_constant_sub_srai(i32 %a) nounwind { ; RV64-NEXT: addi a2, a2, -1171 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: subw a1, a1, a0 +; RV64-NEXT: sub a1, a1, a0 ; RV64-NEXT: srliw a0, a1, 31 ; RV64-NEXT: sraiw a1, a1, 2 ; RV64-NEXT: add a0, a1, a0 @@ -666,7 +666,7 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind { ; RV64IM-NEXT: srai a1, a1, 56 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 8 -; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: sub a1, a1, a0 ; RV64IM-NEXT: slli a1, a1, 56 ; RV64IM-NEXT: srli a0, a1, 63 ; RV64IM-NEXT: srai a1, a1, 58 @@ -679,7 +679,7 @@ define i8 @sdiv8_constant_sub_srai(i8 %a) nounwind { ; RV64IMZB-NEXT: li a2, 109 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 8 -; RV64IMZB-NEXT: subw a1, a1, a0 +; RV64IMZB-NEXT: sub a1, a1, a0 ; RV64IMZB-NEXT: slli a1, a1, 56 ; RV64IMZB-NEXT: srli a0, a1, 63 ; RV64IMZB-NEXT: srai a1, a1, 58 @@ -889,7 +889,7 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind { ; RV64IM-NEXT: addi a2, a2, 1911 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 16 -; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: sub a1, a1, a0 ; RV64IM-NEXT: slli a1, a1, 48 ; RV64IM-NEXT: srli a0, a1, 63 ; RV64IM-NEXT: srai a1, a1, 51 @@ -903,7 +903,7 @@ define i16 @sdiv16_constant_sub_srai(i16 %a) nounwind { ; RV64IMZB-NEXT: addi a2, a2, 1911 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 16 -; RV64IMZB-NEXT: subw a1, a1, a0 +; RV64IMZB-NEXT: sub a1, a1, a0 ; RV64IMZB-NEXT: slli a1, a1, 48 ; RV64IMZB-NEXT: srli a0, a1, 63 ; RV64IMZB-NEXT: srai a1, a1, 51 diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll index 66cde323ce507..774f1a1608821 100644 --- a/llvm/test/CodeGen/RISCV/iabs.ll +++ b/llvm/test/CodeGen/RISCV/iabs.ll @@ -651,7 +651,7 @@ define void @zext16_abs8(i8 %x, ptr %p) { ; RV64I-NEXT: srai a2, a0, 63 ; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: xor a0, a0, a2 -; RV64I-NEXT: subw a0, a0, a2 +; RV64I-NEXT: sub a0, a0, a2 ; RV64I-NEXT: sh a0, 0(a1) ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll index b1a6d163664e5..87c8343a417cd 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll @@ -42,7 +42,7 @@ define i32 @ctz_nxv4i32( %a) #0 { ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV64-NEXT: vredmaxu.vs v8, v8, v8 ; RV64-NEXT: vmv.x.s a1, v8 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll index 20dd590d2ea98..1216d3000e8c8 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll @@ -35,7 +35,7 @@ define i16 @ctz_v4i32(<4 x i32> %a) { ; RV64-NEXT: vredmaxu.vs v8, v8, v8 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: li a1, 4 -; RV64-NEXT: subw a1, a1, a0 +; RV64-NEXT: sub a1, a1, a0 ; RV64-NEXT: zext.b a0, a1 ; RV64-NEXT: ret %res = call i16 @llvm.experimental.cttz.elts.i16.v4i32(<4 x i32> %a, i1 0) diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll index 1be599e4f8e1e..7a1c41c1839fa 100644 --- a/llvm/test/CodeGen/RISCV/machine-combiner.ll +++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll @@ -454,7 +454,7 @@ define i32 @test_reassoc_add_sub_i32_1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) { ; CHECK-LABEL: test_reassoc_add_sub_i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: subw a2, a2, a3 +; CHECK-NEXT: sub a2, a2, a3 ; CHECK-NEXT: subw a0, a0, a2 ; CHECK-NEXT: ret %t0 = add i32 %a0, %a1 @@ -467,7 +467,7 @@ define i32 @test_reassoc_add_sub_i32_2(i32 %a0, i32 %a1, i32 %a2, i32 %a3) { ; CHECK-LABEL: test_reassoc_add_sub_i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: subw a2, a2, a3 +; CHECK-NEXT: sub a2, a2, a3 ; CHECK-NEXT: addw a0, a0, a2 ; CHECK-NEXT: ret %t0 = add i32 %a0, %a1 diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll index 27d5eaa032522..4c9a98cabb15f 100644 --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -1080,14 +1080,14 @@ define i32 @muli32_m65(i32 %a) nounwind { ; RV64I-LABEL: muli32_m65: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a1, a0, 6 -; RV64I-NEXT: negw a0, a0 +; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m65: ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a1, a0, 6 -; RV64IM-NEXT: negw a0, a0 +; RV64IM-NEXT: neg a0, a0 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -65 @@ -1980,14 +1980,14 @@ define i8 @muladd_demand(i8 %x, i8 %y) nounwind { ; RV64I-LABEL: muladd_demand: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: andi a0, a0, 15 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muladd_demand: ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 1 -; RV64IM-NEXT: subw a0, a1, a0 +; RV64IM-NEXT: sub a0, a1, a0 ; RV64IM-NEXT: andi a0, a0, 15 ; RV64IM-NEXT: ret %m = mul i8 %x, 14 @@ -2048,14 +2048,14 @@ define i8 @muladd_demand_2(i8 %x, i8 %y) nounwind { ; RV64I-LABEL: muladd_demand_2: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: subw a1, a1, a0 +; RV64I-NEXT: sub a1, a1, a0 ; RV64I-NEXT: ori a0, a1, -16 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muladd_demand_2: ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 1 -; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: sub a1, a1, a0 ; RV64IM-NEXT: ori a0, a1, -16 ; RV64IM-NEXT: ret %m = mul i8 %x, 14 diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll index fe19a4fa8bbd8..da81fe5708814 100644 --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -179,7 +179,7 @@ define i32 @neg_abs32_multiuse(i32 %x, ptr %y) { ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a2, a0, 31 ; RV64I-NEXT: xor a0, a0, a2 -; RV64I-NEXT: subw a2, a0, a2 +; RV64I-NEXT: sub a2, a0, a2 ; RV64I-NEXT: negw a0, a2 ; RV64I-NEXT: sw a2, 0(a1) ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll index 47b90a006a249..ba6769b2aa3e1 100644 --- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll @@ -833,7 +833,7 @@ define i1 @usubo_ugt_i32(i32 %x, i32 %y, ptr %p) { ; RV64-NEXT: sext.w a3, a1 ; RV64-NEXT: sext.w a4, a0 ; RV64-NEXT: sltu a3, a4, a3 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: sw a0, 0(a2) ; RV64-NEXT: mv a0, a3 ; RV64-NEXT: ret @@ -860,7 +860,7 @@ define i1 @usubo_ugt_constant_op0_i8(i8 %x, ptr %p) { ; RV64: # %bb.0: ; RV64-NEXT: zext.b a2, a0 ; RV64-NEXT: li a3, 42 -; RV64-NEXT: subw a3, a3, a0 +; RV64-NEXT: sub a3, a3, a0 ; RV64-NEXT: sltiu a0, a2, 43 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: sb a3, 0(a1) @@ -890,7 +890,7 @@ define i1 @usubo_ult_constant_op0_i16(i16 %x, ptr %p) { ; RV64-NEXT: slli a2, a0, 48 ; RV64-NEXT: li a3, 43 ; RV64-NEXT: srli a2, a2, 48 -; RV64-NEXT: subw a3, a3, a0 +; RV64-NEXT: sub a3, a3, a0 ; RV64-NEXT: sltiu a0, a2, 44 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: sh a3, 0(a1) @@ -987,7 +987,7 @@ define i1 @usubo_ne_constant0_op1_i32(i32 %x, ptr %p) { ; RV64-LABEL: usubo_ne_constant0_op1_i32: ; RV64: # %bb.0: ; RV64-NEXT: sext.w a2, a0 -; RV64-NEXT: negw a3, a0 +; RV64-NEXT: neg a3, a0 ; RV64-NEXT: snez a0, a2 ; RV64-NEXT: sw a3, 0(a1) ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/pr145360.ll b/llvm/test/CodeGen/RISCV/pr145360.ll index 4251ac60c8bf6..1c77fadbd4b7d 100644 --- a/llvm/test/CodeGen/RISCV/pr145360.ll +++ b/llvm/test/CodeGen/RISCV/pr145360.ll @@ -8,7 +8,7 @@ define i32 @signed(i32 %0, ptr %1) { ; CHECK-NEXT: srliw a2, a2, 24 ; CHECK-NEXT: add a2, a0, a2 ; CHECK-NEXT: andi a2, a2, -256 -; CHECK-NEXT: subw a2, a0, a2 +; CHECK-NEXT: sub a2, a0, a2 ; CHECK-NEXT: sraiw a0, a0, 8 ; CHECK-NEXT: sw a2, 0(a1) ; CHECK-NEXT: ret @@ -29,7 +29,7 @@ define i32 @unsigned(i32 %0, ptr %1) { ; CHECK-NEXT: srli a2, a2, 36 ; CHECK-NEXT: slli a4, a2, 5 ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: subw a2, a2, a4 +; CHECK-NEXT: sub a2, a2, a4 ; CHECK-NEXT: srliw a4, a0, 3 ; CHECK-NEXT: add a2, a0, a2 ; CHECK-NEXT: mulw a0, a4, a3 @@ -49,7 +49,7 @@ define i32 @signed_div_first(i32 %0, ptr %1) { ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: sraiw a2, a3, 8 ; CHECK-NEXT: andi a3, a3, -256 -; CHECK-NEXT: subw a0, a0, a3 +; CHECK-NEXT: sub a0, a0, a3 ; CHECK-NEXT: sw a0, 0(a1) ; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define i32 @unsigned_div_first(i32 %0, ptr %1) { ; CHECK-NEXT: srli a2, a2, 36 ; CHECK-NEXT: slli a3, a2, 5 ; CHECK-NEXT: slli a4, a2, 3 -; CHECK-NEXT: subw a4, a4, a3 +; CHECK-NEXT: sub a4, a4, a3 ; CHECK-NEXT: add a0, a0, a4 ; CHECK-NEXT: sw a0, 0(a1) ; CHECK-NEXT: mv a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll index 634cca5dcdb71..cf64650c964e8 100644 --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -29,7 +29,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotl_32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -56,7 +56,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_32: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -78,7 +78,7 @@ define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotr_32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -105,7 +105,7 @@ define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_32: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -159,7 +159,7 @@ define i64 @rotl_64(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotl_64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -253,7 +253,7 @@ define i64 @rotl_64(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -307,7 +307,7 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotr_64: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -401,7 +401,7 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -423,7 +423,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotl_32_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -450,7 +450,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_32_mask: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -474,7 +474,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64I-LABEL: rotl_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -500,7 +500,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_32_mask_and_63_and_31: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -545,7 +545,7 @@ define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_32_mask_or_64_or_32: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srlw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -569,7 +569,7 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotr_32_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -596,7 +596,7 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_32_mask: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -620,7 +620,7 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64I-LABEL: rotr_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -646,7 +646,7 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_32_mask_and_63_and_31: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -691,7 +691,7 @@ define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_32_mask_or_64_or_32: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sllw a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -745,7 +745,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotl_64_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -835,7 +835,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_mask: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -890,7 +890,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64I-LABEL: rotl_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -981,7 +981,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_mask_and_127_and_63: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1026,7 +1026,7 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_mask_or_128_or_64: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1080,7 +1080,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; ; RV64I-LABEL: rotr_64_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -1170,7 +1170,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_mask: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1225,7 +1225,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64I-LABEL: rotr_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -1316,7 +1316,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_mask_and_127_and_63: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1361,7 +1361,7 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_mask_or_128_or_64: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -1390,7 +1390,7 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64I-LABEL: rotl_32_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srlw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sllw a1, a1, a2 @@ -1424,7 +1424,7 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64XTHEADBB-LABEL: rotl_32_mask_shared: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: srlw a0, a0, a4 ; RV64XTHEADBB-NEXT: or a0, a3, a0 ; RV64XTHEADBB-NEXT: sllw a1, a1, a2 @@ -1486,7 +1486,7 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64I-LABEL: rotl_64_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srl a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sll a1, a1, a2 @@ -1590,7 +1590,7 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64XTHEADBB-LABEL: rotl_64_mask_shared: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: srl a0, a0, a4 ; RV64XTHEADBB-NEXT: or a0, a3, a0 ; RV64XTHEADBB-NEXT: sll a1, a1, a2 @@ -1618,7 +1618,7 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64I-LABEL: rotr_32_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sllw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sllw a1, a1, a2 @@ -1652,7 +1652,7 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign ; RV64XTHEADBB-LABEL: rotr_32_mask_shared: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: sllw a0, a0, a4 ; RV64XTHEADBB-NEXT: or a0, a3, a0 ; RV64XTHEADBB-NEXT: sllw a1, a1, a2 @@ -1713,7 +1713,7 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64I-LABEL: rotr_64_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sll a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sll a1, a1, a2 @@ -1816,7 +1816,7 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign ; RV64XTHEADBB-LABEL: rotr_64_mask_shared: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: sll a0, a0, a4 ; RV64XTHEADBB-NEXT: or a0, a3, a0 ; RV64XTHEADBB-NEXT: sll a1, a1, a2 @@ -1846,7 +1846,7 @@ define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64I-LABEL: rotl_32_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sllw a2, a1, a2 ; RV64I-NEXT: srlw a0, a0, a4 ; RV64I-NEXT: srlw a1, a1, a4 @@ -1884,7 +1884,7 @@ define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64XTHEADBB-LABEL: rotl_32_mask_multiple: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sllw a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: sllw a2, a1, a2 ; RV64XTHEADBB-NEXT: srlw a0, a0, a4 ; RV64XTHEADBB-NEXT: srlw a1, a1, a4 @@ -1948,7 +1948,7 @@ define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64I-LABEL: rotl_64_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sll a2, a1, a2 ; RV64I-NEXT: srl a0, a0, a4 ; RV64I-NEXT: srl a1, a1, a4 @@ -2056,7 +2056,7 @@ define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_mask_multiple: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: sll a2, a1, a2 ; RV64XTHEADBB-NEXT: srl a0, a0, a4 ; RV64XTHEADBB-NEXT: srl a1, a1, a4 @@ -2087,7 +2087,7 @@ define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64I-LABEL: rotr_32_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srlw a2, a1, a2 ; RV64I-NEXT: sllw a0, a0, a4 ; RV64I-NEXT: sllw a1, a1, a4 @@ -2125,7 +2125,7 @@ define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 si ; RV64XTHEADBB-LABEL: rotr_32_mask_multiple: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srlw a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: srlw a2, a1, a2 ; RV64XTHEADBB-NEXT: sllw a0, a0, a4 ; RV64XTHEADBB-NEXT: sllw a1, a1, a4 @@ -2188,7 +2188,7 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64I-LABEL: rotr_64_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a3, a0, a2 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srl a2, a1, a2 ; RV64I-NEXT: sll a0, a0, a4 ; RV64I-NEXT: sll a1, a1, a4 @@ -2295,7 +2295,7 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_mask_multiple: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a3, a0, a2 -; RV64XTHEADBB-NEXT: negw a4, a2 +; RV64XTHEADBB-NEXT: neg a4, a2 ; RV64XTHEADBB-NEXT: srl a2, a1, a2 ; RV64XTHEADBB-NEXT: sll a0, a0, a4 ; RV64XTHEADBB-NEXT: sll a1, a1, a4 @@ -2353,7 +2353,7 @@ define i64 @rotl_64_zext(i64 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotl_64_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -2447,7 +2447,7 @@ define i64 @rotl_64_zext(i64 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotl_64_zext: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: sll a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: srl a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret @@ -2503,7 +2503,7 @@ define i64 @rotr_64_zext(i64 %x, i32 %y) nounwind { ; ; RV64I-LABEL: rotr_64_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 @@ -2597,7 +2597,7 @@ define i64 @rotr_64_zext(i64 %x, i32 %y) nounwind { ; RV64XTHEADBB-LABEL: rotr_64_zext: ; RV64XTHEADBB: # %bb.0: ; RV64XTHEADBB-NEXT: srl a2, a0, a1 -; RV64XTHEADBB-NEXT: negw a1, a1 +; RV64XTHEADBB-NEXT: neg a1, a1 ; RV64XTHEADBB-NEXT: sll a0, a0, a1 ; RV64XTHEADBB-NEXT: or a0, a2, a0 ; RV64XTHEADBB-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll index b8c43289bdfed..721436deb1c0b 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -121,7 +121,7 @@ define signext i32 @andi_sub_cse(i32 signext %0, i32 signext %1, ptr %2) { define signext i32 @addi_sub_cse(i32 signext %0, i32 signext %1, ptr %2) { ; CHECK-LABEL: addi_sub_cse: ; CHECK: # %bb.0: -; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: sub a0, a0, a1 ; CHECK-NEXT: addiw a0, a0, -8 ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll index dad20b2d19464..6b4c2539c88f8 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll @@ -501,14 +501,14 @@ define signext i32 @sext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind define zeroext i32 @zext_subw_aext_aext(i32 %a, i32 %b) nounwind { ; RV64I-LABEL: zext_subw_aext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_aext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -518,14 +518,14 @@ define zeroext i32 @zext_subw_aext_aext(i32 %a, i32 %b) nounwind { define zeroext i32 @zext_subw_aext_sext(i32 %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_subw_aext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_aext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -535,14 +535,14 @@ define zeroext i32 @zext_subw_aext_sext(i32 %a, i32 signext %b) nounwind { define zeroext i32 @zext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_subw_aext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_aext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -552,14 +552,14 @@ define zeroext i32 @zext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind { define zeroext i32 @zext_subw_sext_aext(i32 signext %a, i32 %b) nounwind { ; RV64I-LABEL: zext_subw_sext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_sext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -569,14 +569,14 @@ define zeroext i32 @zext_subw_sext_aext(i32 signext %a, i32 %b) nounwind { define zeroext i32 @zext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_subw_sext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_sext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -586,14 +586,14 @@ define zeroext i32 @zext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind define zeroext i32 @zext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_subw_sext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_sext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -603,14 +603,14 @@ define zeroext i32 @zext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind define zeroext i32 @zext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind { ; RV64I-LABEL: zext_subw_zext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_zext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -620,14 +620,14 @@ define zeroext i32 @zext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind { define zeroext i32 @zext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_subw_zext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_zext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b @@ -637,14 +637,14 @@ define zeroext i32 @zext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind define zeroext i32 @zext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_subw_zext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_subw_zext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = sub i32 %a, %b diff --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll index 0782018833de3..219a5aa6e5f20 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll @@ -9,7 +9,7 @@ define signext i32 @addw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin ; CHECK-NEXT: not a2, a0 ; CHECK-NEXT: addi a3, a0, 1 ; CHECK-NEXT: add a2, a2, a1 -; CHECK-NEXT: subw a1, a1, a0 +; CHECK-NEXT: sub a1, a1, a0 ; CHECK-NEXT: addi a1, a1, -2 ; CHECK-NEXT: mul a3, a2, a3 ; CHECK-NEXT: slli a1, a1, 32 @@ -53,7 +53,7 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin ; CHECK-NEXT: bge a0, a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 -; CHECK-NEXT: subw a3, a1, a0 +; CHECK-NEXT: sub a3, a1, a0 ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: addi a3, a3, -2 ; CHECK-NEXT: mul a2, a1, a2 @@ -61,7 +61,7 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: mulhu a1, a1, a3 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: subw a0, a2, a0 +; CHECK-NEXT: sub a0, a2, a0 ; CHECK-NEXT: subw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll index 00f7b462f68db..81acb4f724136 100644 --- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll @@ -357,7 +357,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: beqz a0, .LBB6_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -365,16 +365,16 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -410,7 +410,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_zero_undef_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -418,16 +418,16 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -455,7 +455,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { ; RV64I-LABEL: findFirstSet_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -463,16 +463,16 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -508,7 +508,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ffs_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -516,16 +516,16 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: lui a4, %hi(.LCPI9_0) ; RV64I-NEXT: addi a4, a4, %lo(.LCPI9_0) diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll index fdff4a39932b9..b46f7cc440b7a 100644 --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -3707,7 +3707,7 @@ define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 %a2) { define i64 @regression(i32 signext %x, i32 signext %y) { ; RV64I-LABEL: regression: ; RV64I: # %bb.0: -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a1, a0, 29 ; RV64I-NEXT: srli a0, a0, 27 @@ -3716,14 +3716,14 @@ define i64 @regression(i32 signext %x, i32 signext %y) { ; ; RV64ZBA-LABEL: regression: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: subw a0, a0, a1 +; RV64ZBA-NEXT: sub a0, a0, a1 ; RV64ZBA-NEXT: slli.uw a0, a0, 3 ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: ret ; ; RV64XANDESPERF-LABEL: regression: ; RV64XANDESPERF: # %bb.0: -; RV64XANDESPERF-NEXT: subw a0, a0, a1 +; RV64XANDESPERF-NEXT: sub a0, a0, a1 ; RV64XANDESPERF-NEXT: slli a0, a0, 32 ; RV64XANDESPERF-NEXT: srli a0, a0, 29 ; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll index 12fc98c7edab8..f2c95f855e178 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll @@ -225,7 +225,7 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -243,7 +243,7 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind { ; RV64I-LABEL: rol_i32_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sw a0, 0(a2) @@ -263,7 +263,7 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: rol_i32_neg_constant_rhs: ; RV64I: # %bb.0: ; RV64I-NEXT: li a1, -2 -; RV64I-NEXT: negw a2, a0 +; RV64I-NEXT: neg a2, a0 ; RV64I-NEXT: sllw a0, a1, a0 ; RV64I-NEXT: srlw a1, a1, a2 ; RV64I-NEXT: or a0, a0, a1 @@ -284,7 +284,7 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -303,7 +303,7 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -321,7 +321,7 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind { ; RV64I-LABEL: ror_i32_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sw a0, 0(a2) @@ -341,7 +341,7 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: ror_i32_neg_constant_rhs: ; RV64I: # %bb.0: ; RV64I-NEXT: li a1, -2 -; RV64I-NEXT: negw a2, a0 +; RV64I-NEXT: neg a2, a0 ; RV64I-NEXT: srlw a0, a1, a0 ; RV64I-NEXT: sllw a1, a1, a2 ; RV64I-NEXT: or a0, a0, a1 @@ -362,7 +362,7 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll index e6407279870db..adeabd6cb7d76 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -347,7 +347,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: beqz a0, .LBB6_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -355,16 +355,16 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -390,7 +390,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_zero_undef_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: slli a2, a0, 8 @@ -398,16 +398,16 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a4, a0, 12 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: slli a2, a0, 16 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 18 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a4, a0, 4 -; RV64I-NEXT: subw a4, a0, a4 +; RV64I-NEXT: sub a4, a0, a4 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: slli a4, a0, 14 -; RV64I-NEXT: subw a3, a3, a4 +; RV64I-NEXT: sub a3, a3, a4 ; RV64I-NEXT: slli a4, a0, 23 -; RV64I-NEXT: subw a2, a2, a4 +; RV64I-NEXT: sub a2, a2, a4 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: add a0, a2, a0 @@ -430,7 +430,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind { define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { ; RV64I-LABEL: findFirstSet_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -438,16 +438,16 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a1, a1, 27 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: add a1, a3, a1 @@ -478,7 +478,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind { define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ffs_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a1, 6 ; RV64I-NEXT: slli a3, a1, 8 @@ -486,16 +486,16 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-NEXT: slli a5, a1, 12 ; RV64I-NEXT: add a2, a2, a3 ; RV64I-NEXT: slli a3, a1, 16 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 18 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: slli a5, a1, 4 -; RV64I-NEXT: subw a5, a1, a5 +; RV64I-NEXT: sub a5, a1, a5 ; RV64I-NEXT: add a2, a5, a2 ; RV64I-NEXT: slli a5, a1, 14 -; RV64I-NEXT: subw a4, a4, a5 +; RV64I-NEXT: sub a4, a4, a5 ; RV64I-NEXT: slli a5, a1, 23 -; RV64I-NEXT: subw a3, a3, a5 +; RV64I-NEXT: sub a3, a3, a5 ; RV64I-NEXT: add a2, a2, a4 ; RV64I-NEXT: lui a4, %hi(.LCPI9_0) ; RV64I-NEXT: addi a4, a4, %lo(.LCPI9_0) @@ -1741,7 +1741,7 @@ define i8 @sub_if_uge_i8(i8 %x, i8 %y) { ; RV64ZBB-LABEL: sub_if_uge_i8: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: zext.b a2, a0 -; RV64ZBB-NEXT: subw a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 ; RV64ZBB-NEXT: zext.b a0, a0 ; RV64ZBB-NEXT: minu a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -1767,7 +1767,7 @@ define i16 @sub_if_uge_i16(i16 %x, i16 %y) { ; RV64ZBB-LABEL: sub_if_uge_i16: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: zext.h a2, a0 -; RV64ZBB-NEXT: subw a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 ; RV64ZBB-NEXT: zext.h a0, a0 ; RV64ZBB-NEXT: minu a0, a2, a0 ; RV64ZBB-NEXT: ret @@ -1852,7 +1852,7 @@ define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) { ; CHECK-NEXT: sltu a2, a3, a2 ; CHECK-NEXT: addi a2, a2, -1 ; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: sub a0, a0, a1 ; CHECK-NEXT: sllw a0, a0, a1 ; CHECK-NEXT: ret %cmp = icmp ult i32 %x, %y @@ -1870,7 +1870,7 @@ define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) { ; RV64I-NEXT: sltu a4, a3, a2 ; RV64I-NEXT: addi a4, a4, -1 ; RV64I-NEXT: and a1, a4, a1 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: bltu a3, a2, .LBB68_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: li a1, 4 @@ -1980,7 +1980,7 @@ define i32 @sub_if_uge_C_i32(i32 signext %x) { ; RV64I-NEXT: lui a2, 1048560 ; RV64I-NEXT: addi a1, a1, -16 ; RV64I-NEXT: sltu a1, a1, a0 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: addi a2, a2, 15 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: addw a0, a0, a1 @@ -2036,7 +2036,7 @@ define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) { ; RV64I-NEXT: lui a3, 1048560 ; RV64I-NEXT: addi a2, a2, -16 ; RV64I-NEXT: sltu a2, a2, a0 -; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: addi a3, a3, 15 ; RV64I-NEXT: and a3, a4, a3 ; RV64I-NEXT: addw a0, a0, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll index 5b82b27a51510..81b2b6594890e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll @@ -63,10 +63,10 @@ define i32 @vpreduce_add_v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) { ; RV64-NEXT: and a2, t4, a2 ; RV64-NEXT: and t0, t3, t1 ; RV64-NEXT: and a7, t2, a7 -; RV64-NEXT: negw a7, a7 -; RV64-NEXT: negw t0, t0 -; RV64-NEXT: negw a2, a2 -; RV64-NEXT: negw a3, a3 +; RV64-NEXT: neg a7, a7 +; RV64-NEXT: neg t0, t0 +; RV64-NEXT: neg a2, a2 +; RV64-NEXT: neg a3, a3 ; RV64-NEXT: and a4, a7, a4 ; RV64-NEXT: and a6, t0, a6 ; RV64-NEXT: and a1, a2, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll index 07aa05f609c40..48845c54c5603 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll @@ -930,7 +930,7 @@ define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, pt ; CHECK-NEXT: add a2, a0, a4 ; CHECK-NEXT: slli a5, a4, 2 ; CHECK-NEXT: add a1, a1, a4 -; CHECK-NEXT: subw a3, a3, a4 +; CHECK-NEXT: sub a3, a3, a4 ; CHECK-NEXT: add a1, a1, a5 ; CHECK-NEXT: slli a3, a3, 32 ; CHECK-NEXT: srli a3, a3, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll index f9ac53b76ebaf..f481f9cff5de1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll @@ -274,10 +274,10 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) { ; CHECK-NOV-NEXT: sgtz a6, a2 ; CHECK-NOV-NEXT: sgtz a7, a3 ; CHECK-NOV-NEXT: sgtz t0, a5 -; CHECK-NOV-NEXT: negw t0, t0 -; CHECK-NOV-NEXT: negw a7, a7 -; CHECK-NOV-NEXT: negw a6, a6 -; CHECK-NOV-NEXT: negw a4, a4 +; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: neg a4, a4 ; CHECK-NOV-NEXT: and a5, t0, a5 ; CHECK-NOV-NEXT: and a3, a7, a3 ; CHECK-NOV-NEXT: and a2, a6, a2 @@ -755,10 +755,10 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-NOV-NEXT: sgtz a4, s1 ; CHECK-NOV-NEXT: sgtz a5, a1 ; CHECK-NOV-NEXT: sgtz a6, a3 -; CHECK-NOV-NEXT: negw a6, a6 -; CHECK-NOV-NEXT: negw a5, a5 -; CHECK-NOV-NEXT: negw a4, a4 -; CHECK-NOV-NEXT: negw a2, a2 +; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: neg a2, a2 ; CHECK-NOV-NEXT: and a3, a6, a3 ; CHECK-NOV-NEXT: and a1, a5, a1 ; CHECK-NOV-NEXT: and a4, a4, s1 @@ -1166,10 +1166,10 @@ define <4 x i16> @ustest_f32i16(<4 x float> %x) { ; CHECK-NOV-NEXT: sgtz a6, a2 ; CHECK-NOV-NEXT: sgtz a7, a3 ; CHECK-NOV-NEXT: sgtz t0, a5 -; CHECK-NOV-NEXT: negw t0, t0 -; CHECK-NOV-NEXT: negw a7, a7 -; CHECK-NOV-NEXT: negw a6, a6 -; CHECK-NOV-NEXT: negw a4, a4 +; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: neg a4, a4 ; CHECK-NOV-NEXT: and a5, t0, a5 ; CHECK-NOV-NEXT: and a3, a7, a3 ; CHECK-NOV-NEXT: and a2, a6, a2 @@ -2040,14 +2040,14 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) { ; CHECK-NOV-NEXT: sgtz t4, a5 ; CHECK-NOV-NEXT: sgtz t5, a6 ; CHECK-NOV-NEXT: sgtz t6, a7 -; CHECK-NOV-NEXT: negw t6, t6 -; CHECK-NOV-NEXT: negw t5, t5 -; CHECK-NOV-NEXT: negw t4, t4 -; CHECK-NOV-NEXT: negw t3, t3 -; CHECK-NOV-NEXT: negw t2, t2 -; CHECK-NOV-NEXT: negw t1, t1 -; CHECK-NOV-NEXT: negw t0, t0 -; CHECK-NOV-NEXT: negw a4, a4 +; CHECK-NOV-NEXT: neg t6, t6 +; CHECK-NOV-NEXT: neg t5, t5 +; CHECK-NOV-NEXT: neg t4, t4 +; CHECK-NOV-NEXT: neg t3, t3 +; CHECK-NOV-NEXT: neg t2, t2 +; CHECK-NOV-NEXT: neg t1, t1 +; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: neg a4, a4 ; CHECK-NOV-NEXT: and a7, t6, a7 ; CHECK-NOV-NEXT: and a6, t5, a6 ; CHECK-NOV-NEXT: and a5, t4, a5 @@ -3830,16 +3830,16 @@ define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) { ; CHECK-NOV-NEXT: mv a5, a3 ; CHECK-NOV-NEXT: .LBB32_5: # %entry ; CHECK-NOV-NEXT: sgtz a3, a5 -; CHECK-NOV-NEXT: negw a3, a3 +; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a5 ; CHECK-NOV-NEXT: sgtz a5, a4 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a4, a5, a4 ; CHECK-NOV-NEXT: sgtz a5, a2 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a2, a5, a2 ; CHECK-NOV-NEXT: sgtz a5, a1 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a1, a5, a1 ; CHECK-NOV-NEXT: sw a3, 0(a0) ; CHECK-NOV-NEXT: sw a4, 4(a0) @@ -4306,16 +4306,16 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) { ; CHECK-NOV-NEXT: mv a3, a2 ; CHECK-NOV-NEXT: .LBB35_5: # %entry ; CHECK-NOV-NEXT: sgtz a2, a3 -; CHECK-NOV-NEXT: negw a2, a2 +; CHECK-NOV-NEXT: neg a2, a2 ; CHECK-NOV-NEXT: and a2, a2, a3 ; CHECK-NOV-NEXT: sgtz a3, a1 -; CHECK-NOV-NEXT: negw a3, a3 +; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a1, a3, a1 ; CHECK-NOV-NEXT: sgtz a3, s1 -; CHECK-NOV-NEXT: negw a3, a3 +; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, s1 ; CHECK-NOV-NEXT: sgtz a4, a0 -; CHECK-NOV-NEXT: negw a4, a4 +; CHECK-NOV-NEXT: neg a4, a4 ; CHECK-NOV-NEXT: and a0, a4, a0 ; CHECK-NOV-NEXT: sw a2, 0(s0) ; CHECK-NOV-NEXT: sw a1, 4(s0) @@ -4707,16 +4707,16 @@ define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) { ; CHECK-NOV-NEXT: mv a5, a3 ; CHECK-NOV-NEXT: .LBB41_5: # %entry ; CHECK-NOV-NEXT: sgtz a3, a5 -; CHECK-NOV-NEXT: negw a3, a3 +; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a5 ; CHECK-NOV-NEXT: sgtz a5, a4 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a4, a5, a4 ; CHECK-NOV-NEXT: sgtz a5, a2 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a2, a5, a2 ; CHECK-NOV-NEXT: sgtz a5, a1 -; CHECK-NOV-NEXT: negw a5, a5 +; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: and a1, a5, a1 ; CHECK-NOV-NEXT: sh a3, 0(a0) ; CHECK-NOV-NEXT: sh a4, 2(a0) @@ -5572,28 +5572,28 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) { ; CHECK-NOV-NEXT: mv a7, a3 ; CHECK-NOV-NEXT: .LBB44_9: # %entry ; CHECK-NOV-NEXT: sgtz a3, a7 -; CHECK-NOV-NEXT: negw a3, a3 +; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a7 ; CHECK-NOV-NEXT: sgtz a7, a6 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a6, a7, a6 ; CHECK-NOV-NEXT: sgtz a7, a5 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a5, a7, a5 ; CHECK-NOV-NEXT: sgtz a7, a4 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a4, a7, a4 ; CHECK-NOV-NEXT: sgtz a7, a2 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a2, a7, a2 ; CHECK-NOV-NEXT: sgtz a7, a1 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a1, a7, a1 ; CHECK-NOV-NEXT: sgtz a7, s1 -; CHECK-NOV-NEXT: negw a7, a7 +; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: and a7, a7, s1 ; CHECK-NOV-NEXT: sgtz t0, a0 -; CHECK-NOV-NEXT: negw t0, t0 +; CHECK-NOV-NEXT: neg t0, t0 ; CHECK-NOV-NEXT: and a0, t0, a0 ; CHECK-NOV-NEXT: sh a2, 8(s0) ; CHECK-NOV-NEXT: sh a1, 10(s0) diff --git a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll index 4d9a6aeaad2ef..749b2041aa63d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll +++ b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll @@ -11,7 +11,7 @@ define i32 @vscale_known_nonzero() { ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: negw a1, a0 +; CHECK-NEXT: neg a1, a0 ; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: slli a1, a0, 6 ; CHECK-NEXT: slli a2, a0, 8 @@ -19,16 +19,16 @@ define i32 @vscale_known_nonzero() { ; CHECK-NEXT: slli a4, a0, 12 ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: slli a2, a0, 16 -; CHECK-NEXT: subw a3, a3, a4 +; CHECK-NEXT: sub a3, a3, a4 ; CHECK-NEXT: slli a4, a0, 18 -; CHECK-NEXT: subw a2, a2, a4 +; CHECK-NEXT: sub a2, a2, a4 ; CHECK-NEXT: slli a4, a0, 4 -; CHECK-NEXT: subw a4, a0, a4 +; CHECK-NEXT: sub a4, a0, a4 ; CHECK-NEXT: add a1, a4, a1 ; CHECK-NEXT: slli a4, a0, 14 -; CHECK-NEXT: subw a3, a3, a4 +; CHECK-NEXT: sub a3, a3, a4 ; CHECK-NEXT: slli a4, a0, 23 -; CHECK-NEXT: subw a2, a2, a4 +; CHECK-NEXT: sub a2, a2, a4 ; CHECK-NEXT: slli a0, a0, 27 ; CHECK-NEXT: add a1, a1, a3 ; CHECK-NEXT: add a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index c216fb65a6a5b..346e40ab0afe5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -549,7 +549,7 @@ define void @sink_splat_rsub_scalable(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB10_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: lw a3, 0(a2) -; CHECK-NEXT: subw a3, a1, a3 +; CHECK-NEXT: sub a3, a1, a3 ; CHECK-NEXT: sw a3, 0(a2) ; CHECK-NEXT: addi a2, a2, 4 ; CHECK-NEXT: bne a2, a0, .LBB10_6 diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll index 66e114c938c06..f295bd8d74df3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll @@ -2300,7 +2300,7 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) { ; CHECK-RV64-NEXT: j .LBB98_5 ; CHECK-RV64-NEXT: .LBB98_2: # %vector.ph ; CHECK-RV64-NEXT: srli a3, a4, 1 -; CHECK-RV64-NEXT: negw a2, a3 +; CHECK-RV64-NEXT: neg a2, a3 ; CHECK-RV64-NEXT: andi a2, a2, 256 ; CHECK-RV64-NEXT: slli a4, a4, 1 ; CHECK-RV64-NEXT: mv a5, a0 @@ -2393,7 +2393,7 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) { ; CHECK-ZVKB-NOZBB64-NEXT: j .LBB98_5 ; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_2: # %vector.ph ; CHECK-ZVKB-NOZBB64-NEXT: srli a3, a4, 1 -; CHECK-ZVKB-NOZBB64-NEXT: negw a2, a3 +; CHECK-ZVKB-NOZBB64-NEXT: neg a2, a3 ; CHECK-ZVKB-NOZBB64-NEXT: andi a2, a2, 256 ; CHECK-ZVKB-NOZBB64-NEXT: slli a4, a4, 1 ; CHECK-ZVKB-NOZBB64-NEXT: mv a5, a0 @@ -2485,7 +2485,7 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) { ; CHECK-ZVKB-ZBB64-NEXT: j .LBB98_5 ; CHECK-ZVKB-ZBB64-NEXT: .LBB98_2: # %vector.ph ; CHECK-ZVKB-ZBB64-NEXT: srli a3, a4, 1 -; CHECK-ZVKB-ZBB64-NEXT: negw a2, a3 +; CHECK-ZVKB-ZBB64-NEXT: neg a2, a3 ; CHECK-ZVKB-ZBB64-NEXT: andi a2, a2, 256 ; CHECK-ZVKB-ZBB64-NEXT: slli a4, a4, 1 ; CHECK-ZVKB-ZBB64-NEXT: mv a5, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll index 3740737ba2989..d0b184bd853ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll @@ -50,9 +50,9 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) { ; RV64-NEXT: sgtz a5, a5 ; RV64-NEXT: sgtz a4, a4 ; RV64-NEXT: sgtz a3, a3 -; RV64-NEXT: negw a3, a3 -; RV64-NEXT: negw a4, a4 -; RV64-NEXT: negw a5, a5 +; RV64-NEXT: neg a3, a3 +; RV64-NEXT: neg a4, a4 +; RV64-NEXT: neg a5, a5 ; RV64-NEXT: and a3, a3, a6 ; RV64-NEXT: and a0, a4, a0 ; RV64-NEXT: and a2, a5, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll index 25a226e60e715..eb129da2697b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll @@ -959,7 +959,7 @@ define @vrol_vx_nxv1i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vrol_vx_nxv1i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsll.vx v9, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1022,7 +1022,7 @@ define @vrol_vx_nxv2i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vrol_vx_nxv2i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsll.vx v10, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1085,7 +1085,7 @@ define @vrol_vx_nxv4i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vrol_vx_nxv4i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsll.vx v12, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1148,7 +1148,7 @@ define @vrol_vx_nxv8i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vrol_vx_nxv8i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsll.vx v16, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll index 9e63b613ab70b..97524ac61b96e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll @@ -1626,7 +1626,7 @@ define @vror_vx_nxv1i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vror_vx_nxv1i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsrl.vx v9, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1728,7 +1728,7 @@ define @vror_vx_nxv2i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vror_vx_nxv2i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsrl.vx v10, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1830,7 +1830,7 @@ define @vror_vx_nxv4i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vror_vx_nxv4i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsrl.vx v12, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 @@ -1932,7 +1932,7 @@ define @vror_vx_nxv8i64( %a, i64 %b) { ; CHECK-RV64-LABEL: vror_vx_nxv8i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: andi a1, a0, 63 -; CHECK-RV64-NEXT: negw a0, a0 +; CHECK-RV64-NEXT: neg a0, a0 ; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsrl.vx v16, v8, a1 ; CHECK-RV64-NEXT: andi a0, a0, 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll index 8eef133d0e76c..4442f97b8fe76 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll @@ -77,7 +77,7 @@ define i64 @con1024_minus_rem() { ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: negw a0, a0 +; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: andi a0, a0, 1024 ; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 0ea80bf592999..2e1784d369680 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -647,7 +647,7 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV64IM-LABEL: select_add_1: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: negw a0, a0 +; RV64IM-NEXT: neg a0, a0 ; RV64IM-NEXT: and a0, a0, a1 ; RV64IM-NEXT: addw a0, a2, a0 ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll index b128abb6b5bdd..b155feab9b4d9 100644 --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -1048,21 +1048,21 @@ define signext i32 @bug(i32 signext %x) { ; CHECK-NEXT: srliw a2, a0, 24 ; CHECK-NEXT: seqz a2, a2 ; CHECK-NEXT: slli a3, a2, 3 -; CHECK-NEXT: negw a2, a2 +; CHECK-NEXT: neg a2, a2 ; CHECK-NEXT: sllw a0, a0, a3 ; CHECK-NEXT: andi a2, a2, -8 ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a2, a0, 28 ; CHECK-NEXT: seqz a2, a2 ; CHECK-NEXT: slli a3, a2, 2 -; CHECK-NEXT: negw a2, a2 +; CHECK-NEXT: neg a2, a2 ; CHECK-NEXT: sllw a0, a0, a3 ; CHECK-NEXT: andi a2, a2, -4 ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a2, a0, 30 ; CHECK-NEXT: seqz a2, a2 ; CHECK-NEXT: slli a3, a2, 1 -; CHECK-NEXT: negw a2, a2 +; CHECK-NEXT: neg a2, a2 ; CHECK-NEXT: sllw a0, a0, a3 ; CHECK-NEXT: andi a2, a2, -2 ; CHECK-NEXT: add a1, a1, a2 @@ -1090,21 +1090,21 @@ define signext i32 @bug(i32 signext %x) { ; NOREMOVAL-NEXT: srliw a2, a0, 24 ; NOREMOVAL-NEXT: seqz a2, a2 ; NOREMOVAL-NEXT: slli a3, a2, 3 -; NOREMOVAL-NEXT: negw a2, a2 +; NOREMOVAL-NEXT: neg a2, a2 ; NOREMOVAL-NEXT: sllw a0, a0, a3 ; NOREMOVAL-NEXT: andi a2, a2, -8 ; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a2, a0, 28 ; NOREMOVAL-NEXT: seqz a2, a2 ; NOREMOVAL-NEXT: slli a3, a2, 2 -; NOREMOVAL-NEXT: negw a2, a2 +; NOREMOVAL-NEXT: neg a2, a2 ; NOREMOVAL-NEXT: sllw a0, a0, a3 ; NOREMOVAL-NEXT: andi a2, a2, -4 ; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a2, a0, 30 ; NOREMOVAL-NEXT: seqz a2, a2 ; NOREMOVAL-NEXT: slli a3, a2, 1 -; NOREMOVAL-NEXT: negw a2, a2 +; NOREMOVAL-NEXT: neg a2, a2 ; NOREMOVAL-NEXT: sllw a0, a0, a3 ; NOREMOVAL-NEXT: andi a2, a2, -2 ; NOREMOVAL-NEXT: add a1, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll index 7ca1ee1cba2f8..1ca23d72b107b 100644 --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -383,7 +383,7 @@ define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind { ; RV64I-LABEL: fshr64_minsize: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a2, a0, a1 -; RV64I-NEXT: negw a1, a1 +; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll index 99dc4f816d669..e44d247b7d56b 100644 --- a/llvm/test/CodeGen/RISCV/shl-cttz.ll +++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll @@ -40,7 +40,7 @@ define i8 @shl_cttz_i8(i8 %x, i8 %y) { ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a2, a1, 1 ; RV64I-NEXT: andi a2, a2, 85 -; RV64I-NEXT: subw a1, a1, a2 +; RV64I-NEXT: sub a1, a1, a2 ; RV64I-NEXT: andi a2, a1, 51 ; RV64I-NEXT: srli a1, a1, 2 ; RV64I-NEXT: andi a1, a1, 51 @@ -96,7 +96,7 @@ define i8 @shl_cttz_constant_i8(i8 %y) { ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: andi a1, a1, 85 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: andi a1, a0, 51 ; RV64I-NEXT: srli a0, a0, 2 ; RV64I-NEXT: andi a0, a0, 51 @@ -276,7 +276,7 @@ define i32 @shl_cttz_i32(i32 %x, i32 %y) { ; ; RV64I-LABEL: shl_cttz_i32: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: lui a2, 30667 ; RV64I-NEXT: addi a2, a2, 1329 @@ -333,7 +333,7 @@ define i32 @shl_cttz_i32_zero_is_defined(i32 %x, i32 %y) { ; RV64I-NEXT: sext.w a2, a1 ; RV64I-NEXT: beqz a2, .LBB5_2 ; RV64I-NEXT: # %bb.1: # %cond.false -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: lui a2, 30667 ; RV64I-NEXT: addi a2, a2, 1329 @@ -378,7 +378,7 @@ define i32 @shl_cttz_constant_i32(i32 %y) { ; ; RV64I-LABEL: shl_cttz_constant_i32: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: neg a1, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: lui a1, 30667 ; RV64I-NEXT: addi a1, a1, 1329 @@ -474,7 +474,7 @@ define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) { ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: .cfi_offset s0, -16 ; RV64I-NEXT: .cfi_offset s1, -24 -; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: lui a2, 30667 ; RV64I-NEXT: addi a2, a2, 1329 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll index 93fb230f51ce1..bc23388315de7 100644 --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -50,7 +50,7 @@ define i1 @test_srem_odd(i29 %X) nounwind { ; RV64-NEXT: add a2, a2, a4 ; RV64-NEXT: slli a4, a0, 2 ; RV64-NEXT: add a4, a0, a4 -; RV64-NEXT: subw a1, a1, a4 +; RV64-NEXT: sub a1, a1, a4 ; RV64-NEXT: slli a4, a0, 17 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: slli a0, a0, 23 @@ -59,8 +59,8 @@ define i1 @test_srem_odd(i29 %X) nounwind { ; RV64-NEXT: add a1, a1, a3 ; RV64-NEXT: lui a3, 1324 ; RV64-NEXT: addi a2, a2, -83 -; RV64-NEXT: subw a0, a0, a2 -; RV64-NEXT: subw a1, a1, a0 +; RV64-NEXT: sub a0, a0, a2 +; RV64-NEXT: sub a1, a1, a0 ; RV64-NEXT: slli a1, a1, 35 ; RV64-NEXT: srli a1, a1, 35 ; RV64-NEXT: addi a0, a3, -165 @@ -189,7 +189,7 @@ define i1 @test_srem_even(i4 %X) nounwind { ; RV64M-NEXT: add a1, a1, a2 ; RV64M-NEXT: slli a2, a1, 3 ; RV64M-NEXT: slli a1, a1, 1 -; RV64M-NEXT: subw a1, a1, a2 +; RV64M-NEXT: sub a1, a1, a2 ; RV64M-NEXT: add a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 15 ; RV64M-NEXT: addi a0, a0, -1 @@ -225,7 +225,7 @@ define i1 @test_srem_even(i4 %X) nounwind { ; RV64MV-NEXT: add a1, a1, a2 ; RV64MV-NEXT: slli a2, a1, 3 ; RV64MV-NEXT: slli a1, a1, 1 -; RV64MV-NEXT: subw a1, a1, a2 +; RV64MV-NEXT: sub a1, a1, a2 ; RV64MV-NEXT: add a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 15 ; RV64MV-NEXT: addi a0, a0, -1 @@ -256,7 +256,7 @@ define i1 @test_srem_pow2_setne(i6 %X) nounwind { ; RV64-NEXT: srli a1, a1, 62 ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: andi a1, a1, 60 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: andi a0, a0, 63 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: ret @@ -280,7 +280,7 @@ define i1 @test_srem_pow2_setne(i6 %X) nounwind { ; RV64M-NEXT: srli a1, a1, 62 ; RV64M-NEXT: add a1, a0, a1 ; RV64M-NEXT: andi a1, a1, 60 -; RV64M-NEXT: subw a0, a0, a1 +; RV64M-NEXT: sub a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 63 ; RV64M-NEXT: snez a0, a0 ; RV64M-NEXT: ret @@ -304,7 +304,7 @@ define i1 @test_srem_pow2_setne(i6 %X) nounwind { ; RV64MV-NEXT: srli a1, a1, 62 ; RV64MV-NEXT: add a1, a0, a1 ; RV64MV-NEXT: andi a1, a1, 60 -; RV64MV-NEXT: subw a0, a0, a1 +; RV64MV-NEXT: sub a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 63 ; RV64MV-NEXT: snez a0, a0 ; RV64MV-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll index 30ffaf6c7ceca..5129cccdac06a 100644 --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -183,10 +183,10 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul a5, a5, t1 ; RV64IM-NEXT: li t1, -124 ; RV64IM-NEXT: mul a6, a6, t1 -; RV64IM-NEXT: subw a4, a4, a7 -; RV64IM-NEXT: subw a1, a1, t0 -; RV64IM-NEXT: subw a3, a3, a5 -; RV64IM-NEXT: subw a2, a2, a6 +; RV64IM-NEXT: sub a4, a4, a7 +; RV64IM-NEXT: sub a1, a1, t0 +; RV64IM-NEXT: sub a3, a3, a5 +; RV64IM-NEXT: sub a2, a2, a6 ; RV64IM-NEXT: sh a3, 0(a0) ; RV64IM-NEXT: sh a2, 2(a0) ; RV64IM-NEXT: sh a4, 4(a0) @@ -357,10 +357,10 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul a7, a7, t1 ; RV64IM-NEXT: mul t0, t0, t1 ; RV64IM-NEXT: mul a2, a2, t1 -; RV64IM-NEXT: subw a3, a3, a6 -; RV64IM-NEXT: subw a4, a4, a7 -; RV64IM-NEXT: subw a5, a5, t0 -; RV64IM-NEXT: subw a1, a1, a2 +; RV64IM-NEXT: sub a3, a3, a6 +; RV64IM-NEXT: sub a4, a4, a7 +; RV64IM-NEXT: sub a5, a5, t0 +; RV64IM-NEXT: sub a1, a1, a2 ; RV64IM-NEXT: sh a3, 0(a0) ; RV64IM-NEXT: sh a4, 2(a0) ; RV64IM-NEXT: sh a5, 4(a0) @@ -597,10 +597,10 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind { ; RV64IM-NEXT: add a1, a1, t1 ; RV64IM-NEXT: add a3, a3, t0 ; RV64IM-NEXT: add a4, a4, a7 -; RV64IM-NEXT: subw a2, a2, a6 -; RV64IM-NEXT: subw a1, a1, t4 -; RV64IM-NEXT: subw a3, a3, t3 -; RV64IM-NEXT: subw a4, a4, t2 +; RV64IM-NEXT: sub a2, a2, a6 +; RV64IM-NEXT: sub a1, a1, t4 +; RV64IM-NEXT: sub a3, a3, t3 +; RV64IM-NEXT: sub a4, a4, t2 ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a3, 4(a0) @@ -703,15 +703,15 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind { ; RV64I-NEXT: srli a1, a2, 58 ; RV64I-NEXT: add a1, a2, a1 ; RV64I-NEXT: andi a1, a1, -64 -; RV64I-NEXT: subw s1, a2, a1 +; RV64I-NEXT: sub s1, a2, a1 ; RV64I-NEXT: srli a1, a3, 59 ; RV64I-NEXT: add a1, a3, a1 ; RV64I-NEXT: andi a1, a1, -32 -; RV64I-NEXT: subw s2, a3, a1 +; RV64I-NEXT: sub s2, a3, a1 ; RV64I-NEXT: srli a1, a4, 61 ; RV64I-NEXT: add a1, a4, a1 ; RV64I-NEXT: andi a1, a1, -8 -; RV64I-NEXT: subw s3, a4, a1 +; RV64I-NEXT: sub s3, a4, a1 ; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: call __moddi3 ; RV64I-NEXT: sh s1, 0(s0) @@ -737,23 +737,23 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind { ; RV64IM-NEXT: srli a6, a2, 58 ; RV64IM-NEXT: add a6, a2, a6 ; RV64IM-NEXT: andi a6, a6, -64 -; RV64IM-NEXT: subw a2, a2, a6 +; RV64IM-NEXT: sub a2, a2, a6 ; RV64IM-NEXT: srli a6, a3, 59 ; RV64IM-NEXT: add a6, a3, a6 ; RV64IM-NEXT: andi a6, a6, -32 -; RV64IM-NEXT: subw a3, a3, a6 +; RV64IM-NEXT: sub a3, a3, a6 ; RV64IM-NEXT: srli a6, a4, 61 ; RV64IM-NEXT: mulh a5, a1, a5 ; RV64IM-NEXT: add a6, a4, a6 ; RV64IM-NEXT: add a5, a5, a1 ; RV64IM-NEXT: andi a6, a6, -8 -; RV64IM-NEXT: subw a4, a4, a6 +; RV64IM-NEXT: sub a4, a4, a6 ; RV64IM-NEXT: srli a6, a5, 63 ; RV64IM-NEXT: srli a5, a5, 6 ; RV64IM-NEXT: add a5, a5, a6 ; RV64IM-NEXT: li a6, 95 ; RV64IM-NEXT: mul a5, a5, a6 -; RV64IM-NEXT: subw a1, a1, a5 +; RV64IM-NEXT: sub a1, a1, a5 ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: sh a3, 2(a0) ; RV64IM-NEXT: sh a4, 4(a0) @@ -909,9 +909,9 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul a6, a6, a7 ; RV64IM-NEXT: li a7, 23 ; RV64IM-NEXT: mul a4, a4, a7 -; RV64IM-NEXT: subw a2, a2, a5 -; RV64IM-NEXT: subw a1, a1, a6 -; RV64IM-NEXT: subw a3, a3, a4 +; RV64IM-NEXT: sub a2, a2, a5 +; RV64IM-NEXT: sub a1, a1, a6 +; RV64IM-NEXT: sub a3, a3, a4 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 2(a0) ; RV64IM-NEXT: sh a3, 4(a0) @@ -1011,7 +1011,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind { ; RV64I-NEXT: add a1, a2, a1 ; RV64I-NEXT: lui a3, 8 ; RV64I-NEXT: and a1, a1, a3 -; RV64I-NEXT: subw s3, a2, a1 +; RV64I-NEXT: sub s3, a2, a1 ; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: call __moddi3 ; RV64I-NEXT: mv s2, a0 @@ -1050,7 +1050,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind { ; RV64IM-NEXT: add a5, a5, a7 ; RV64IM-NEXT: mulh a4, a3, a4 ; RV64IM-NEXT: add a4, a4, a3 -; RV64IM-NEXT: subw a2, a2, a6 +; RV64IM-NEXT: sub a2, a2, a6 ; RV64IM-NEXT: srli a6, a4, 63 ; RV64IM-NEXT: srli a4, a4, 4 ; RV64IM-NEXT: add a4, a4, a6 @@ -1059,8 +1059,8 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul a5, a5, a6 ; RV64IM-NEXT: li a6, 23 ; RV64IM-NEXT: mul a4, a4, a6 -; RV64IM-NEXT: subw a1, a1, a5 -; RV64IM-NEXT: subw a3, a3, a4 +; RV64IM-NEXT: sub a1, a1, a5 +; RV64IM-NEXT: sub a3, a3, a4 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 2(a0) ; RV64IM-NEXT: sh a3, 4(a0) diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll index 3007c3574cf78..0c13a1d8a46f3 100644 --- a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll +++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll @@ -26,7 +26,7 @@ define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) { define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: overflow_sub: ; CHECK: # %bb.0: -; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: sub a0, a0, a1 ; CHECK-NEXT: ori a0, a0, 1 ; CHECK-NEXT: slli a0, a0, 48 ; CHECK-NEXT: srli a0, a0, 48 diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll index af5121dfe180d..ee496123ba7b4 100644 --- a/llvm/test/CodeGen/RISCV/urem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll @@ -48,7 +48,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind { ; RV64IM-NEXT: slli a2, a2, 32 ; RV64IM-NEXT: mulhu a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: subw a2, a0, a1 +; RV64IM-NEXT: sub a2, a0, a1 ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 6 @@ -174,7 +174,7 @@ define i32 @combine_urem_udiv(i32 %x) nounwind { ; RV64IM-NEXT: slli a2, a2, 32 ; RV64IM-NEXT: mulhu a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: subw a2, a0, a1 +; RV64IM-NEXT: sub a2, a0, a1 ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: li a2, 95 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll index d33c6662ceb5c..636fdfae68438 100644 --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -31,11 +31,11 @@ define i1 @test_urem_odd(i13 %X) nounwind { ; RV64-NEXT: slli a1, a0, 4 ; RV64-NEXT: slli a2, a0, 6 ; RV64-NEXT: slli a3, a0, 8 -; RV64-NEXT: subw a1, a1, a2 +; RV64-NEXT: sub a1, a1, a2 ; RV64-NEXT: slli a2, a0, 10 -; RV64-NEXT: subw a3, a3, a2 +; RV64-NEXT: sub a3, a3, a2 ; RV64-NEXT: slli a2, a0, 2 -; RV64-NEXT: subw a2, a0, a2 +; RV64-NEXT: sub a2, a0, a2 ; RV64-NEXT: slli a0, a0, 12 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a0, a3, a0 @@ -138,10 +138,10 @@ define i1 @test_urem_even(i27 %X) nounwind { ; RV64-NEXT: slli a4, a0, 18 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: slli a0, a0, 27 -; RV64-NEXT: subw a0, a0, a2 +; RV64-NEXT: sub a0, a0, a2 ; RV64-NEXT: lui a2, 2341 ; RV64-NEXT: add a1, a1, a3 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: slli a1, a0, 26 ; RV64-NEXT: slli a0, a0, 37 ; RV64-NEXT: srli a0, a0, 38 @@ -234,8 +234,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind { ; RV64-LABEL: test_urem_odd_setne: ; RV64: # %bb.0: ; RV64-NEXT: slli a1, a0, 1 -; RV64-NEXT: negw a0, a0 -; RV64-NEXT: subw a0, a0, a1 +; RV64-NEXT: neg a0, a0 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: andi a0, a0, 15 ; RV64-NEXT: sltiu a0, a0, 4 ; RV64-NEXT: xori a0, a0, 1 @@ -254,8 +254,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind { ; RV64M-LABEL: test_urem_odd_setne: ; RV64M: # %bb.0: ; RV64M-NEXT: slli a1, a0, 1 -; RV64M-NEXT: negw a0, a0 -; RV64M-NEXT: subw a0, a0, a1 +; RV64M-NEXT: neg a0, a0 +; RV64M-NEXT: sub a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 15 ; RV64M-NEXT: sltiu a0, a0, 4 ; RV64M-NEXT: xori a0, a0, 1 @@ -274,8 +274,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind { ; RV64MV-LABEL: test_urem_odd_setne: ; RV64MV: # %bb.0: ; RV64MV-NEXT: slli a1, a0, 1 -; RV64MV-NEXT: negw a0, a0 -; RV64MV-NEXT: subw a0, a0, a1 +; RV64MV-NEXT: neg a0, a0 +; RV64MV-NEXT: sub a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 15 ; RV64MV-NEXT: sltiu a0, a0, 4 ; RV64MV-NEXT: xori a0, a0, 1 @@ -306,9 +306,9 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind { ; RV64-NEXT: slli a1, a0, 2 ; RV64-NEXT: slli a2, a0, 4 ; RV64-NEXT: slli a3, a0, 6 -; RV64-NEXT: subw a1, a1, a0 -; RV64-NEXT: subw a2, a2, a3 -; RV64-NEXT: subw a1, a1, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: sub a2, a2, a3 +; RV64-NEXT: sub a1, a1, a2 ; RV64-NEXT: slli a0, a0, 8 ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: andi a0, a0, 511 @@ -437,7 +437,7 @@ define void @test_urem_vec(ptr %X) nounwind { ; RV64-NEXT: addi a2, a2, -2 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a5, a5, a6 -; RV64-NEXT: subw a4, t0, a7 +; RV64-NEXT: sub a4, t0, a7 ; RV64-NEXT: slli a6, a3, 3 ; RV64-NEXT: slli a7, a3, 6 ; RV64-NEXT: slli t0, a3, 9 @@ -447,18 +447,18 @@ define void @test_urem_vec(ptr %X) nounwind { ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a7, t0 ; RV64-NEXT: slli t0, a2, 6 -; RV64-NEXT: subw a6, a6, t0 +; RV64-NEXT: sub a6, a6, t0 ; RV64-NEXT: slli t0, a2, 8 -; RV64-NEXT: subw a5, a5, a2 +; RV64-NEXT: sub a5, a5, a2 ; RV64-NEXT: slli a2, a2, 10 -; RV64-NEXT: subw a2, t0, a2 -; RV64-NEXT: subw a4, a4, a1 +; RV64-NEXT: sub a2, t0, a2 +; RV64-NEXT: sub a4, a4, a1 ; RV64-NEXT: add a3, a3, a7 -; RV64-NEXT: subw a1, a5, a6 +; RV64-NEXT: sub a1, a5, a6 ; RV64-NEXT: slli a5, a4, 10 ; RV64-NEXT: slli a4, a4, 53 -; RV64-NEXT: negw a3, a3 -; RV64-NEXT: subw a1, a1, a2 +; RV64-NEXT: neg a3, a3 +; RV64-NEXT: sub a1, a1, a2 ; RV64-NEXT: srli a4, a4, 54 ; RV64-NEXT: andi a2, a3, 2047 ; RV64-NEXT: andi a1, a1, 2047 diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll index 3ef9f3f945108..5a3dfd118307d 100644 --- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll @@ -157,10 +157,10 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul a7, a7, t1 ; RV64IM-NEXT: slli t1, a5, 7 ; RV64IM-NEXT: slli a5, a5, 2 -; RV64IM-NEXT: subw a5, a5, t1 -; RV64IM-NEXT: subw a2, a2, a6 -; RV64IM-NEXT: subw a4, a4, t0 -; RV64IM-NEXT: subw a1, a1, a7 +; RV64IM-NEXT: sub a5, a5, t1 +; RV64IM-NEXT: sub a2, a2, a6 +; RV64IM-NEXT: sub a4, a4, t0 +; RV64IM-NEXT: sub a1, a1, a7 ; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: sh a3, 2(a0) @@ -300,10 +300,10 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind { ; RV64IM-NEXT: mul t0, t0, a6 ; RV64IM-NEXT: mul t1, t1, a6 ; RV64IM-NEXT: mul a2, a2, a6 -; RV64IM-NEXT: subw a3, a3, a7 -; RV64IM-NEXT: subw a4, a4, t0 -; RV64IM-NEXT: subw a5, a5, t1 -; RV64IM-NEXT: subw a1, a1, a2 +; RV64IM-NEXT: sub a3, a3, a7 +; RV64IM-NEXT: sub a4, a4, t0 +; RV64IM-NEXT: sub a5, a5, t1 +; RV64IM-NEXT: sub a1, a1, a2 ; RV64IM-NEXT: sh a3, 0(a0) ; RV64IM-NEXT: sh a4, 2(a0) ; RV64IM-NEXT: sh a5, 4(a0) @@ -508,10 +508,10 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind { ; RV64IM-NEXT: add a1, a1, t1 ; RV64IM-NEXT: add a3, a3, t0 ; RV64IM-NEXT: add a4, a4, a7 -; RV64IM-NEXT: subw a2, a2, a6 -; RV64IM-NEXT: subw a1, a1, t4 -; RV64IM-NEXT: subw a3, a3, t3 -; RV64IM-NEXT: subw a4, a4, t2 +; RV64IM-NEXT: sub a2, a2, a6 +; RV64IM-NEXT: sub a1, a1, t4 +; RV64IM-NEXT: sub a3, a3, t3 +; RV64IM-NEXT: sub a4, a4, t2 ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a3, 4(a0) @@ -622,7 +622,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind { ; RV64IM-NEXT: andi a4, a4, 7 ; RV64IM-NEXT: mulhu a5, a1, a5 ; RV64IM-NEXT: mul a5, a5, a6 -; RV64IM-NEXT: subw a1, a1, a5 +; RV64IM-NEXT: sub a1, a1, a5 ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: sh a3, 2(a0) ; RV64IM-NEXT: sh a4, 4(a0) @@ -757,9 +757,9 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind { ; RV64IM-NEXT: addi a7, a7, 1327 ; RV64IM-NEXT: mulhu a5, a1, a5 ; RV64IM-NEXT: mul a5, a5, a7 -; RV64IM-NEXT: subw a2, a2, a4 -; RV64IM-NEXT: subw a3, a3, a6 -; RV64IM-NEXT: subw a1, a1, a5 +; RV64IM-NEXT: sub a2, a2, a4 +; RV64IM-NEXT: sub a3, a3, a6 +; RV64IM-NEXT: sub a1, a1, a5 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 2(a0) ; RV64IM-NEXT: sh a3, 4(a0)