|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ |
| 3 | +; RUN: | FileCheck -check-prefixes=CHECK,RV32I %s |
| 4 | +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ |
| 5 | +; RUN: | FileCheck -check-prefixes=CHECK,RV64I %s |
| 6 | + |
| 7 | +define i32 @fold_urem_constants(i32 %v0) nounwind { |
| 8 | +; RV32I-LABEL: fold_urem_constants: |
| 9 | +; RV32I: # %bb.0: |
| 10 | +; RV32I-NEXT: li a1, 5 |
| 11 | +; RV32I-NEXT: tail __umodsi3 |
| 12 | +; |
| 13 | +; RV64I-LABEL: fold_urem_constants: |
| 14 | +; RV64I: # %bb.0: |
| 15 | +; RV64I-NEXT: addi sp, sp, -16 |
| 16 | +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| 17 | +; RV64I-NEXT: slli a0, a0, 32 |
| 18 | +; RV64I-NEXT: srli a0, a0, 32 |
| 19 | +; RV64I-NEXT: li a1, 5 |
| 20 | +; RV64I-NEXT: call __umoddi3 |
| 21 | +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| 22 | +; RV64I-NEXT: addi sp, sp, 16 |
| 23 | +; RV64I-NEXT: ret |
| 24 | + %v1 = urem i32 %v0, 25 |
| 25 | + %v2 = urem i32 %v1, 5 |
| 26 | + ret i32 %v2 |
| 27 | +} |
| 28 | + |
| 29 | +define i32 @dont_fold_urem_constants(i32 %v0) nounwind { |
| 30 | +; RV32I-LABEL: dont_fold_urem_constants: |
| 31 | +; RV32I: # %bb.0: |
| 32 | +; RV32I-NEXT: addi sp, sp, -16 |
| 33 | +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| 34 | +; RV32I-NEXT: li a1, 25 |
| 35 | +; RV32I-NEXT: call __umodsi3 |
| 36 | +; RV32I-NEXT: li a1, 3 |
| 37 | +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| 38 | +; RV32I-NEXT: addi sp, sp, 16 |
| 39 | +; RV32I-NEXT: tail __umodsi3 |
| 40 | +; |
| 41 | +; RV64I-LABEL: dont_fold_urem_constants: |
| 42 | +; RV64I: # %bb.0: |
| 43 | +; RV64I-NEXT: addi sp, sp, -16 |
| 44 | +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| 45 | +; RV64I-NEXT: slli a0, a0, 32 |
| 46 | +; RV64I-NEXT: srli a0, a0, 32 |
| 47 | +; RV64I-NEXT: li a1, 25 |
| 48 | +; RV64I-NEXT: call __umoddi3 |
| 49 | +; RV64I-NEXT: li a1, 3 |
| 50 | +; RV64I-NEXT: call __umoddi3 |
| 51 | +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| 52 | +; RV64I-NEXT: addi sp, sp, 16 |
| 53 | +; RV64I-NEXT: ret |
| 54 | + %v1 = urem i32 %v0, 25 |
| 55 | + %v2 = urem i32 %v1, 3 |
| 56 | + ret i32 %v2 |
| 57 | +} |
| 58 | + |
| 59 | +define i32 @dont_fold_urem_srem_mixed_constants(i32 %v0) nounwind { |
| 60 | +; RV32I-LABEL: dont_fold_urem_srem_mixed_constants: |
| 61 | +; RV32I: # %bb.0: |
| 62 | +; RV32I-NEXT: addi sp, sp, -16 |
| 63 | +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| 64 | +; RV32I-NEXT: li a1, 25 |
| 65 | +; RV32I-NEXT: call __umodsi3 |
| 66 | +; RV32I-NEXT: li a1, 3 |
| 67 | +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| 68 | +; RV32I-NEXT: addi sp, sp, 16 |
| 69 | +; RV32I-NEXT: tail __umodsi3 |
| 70 | +; |
| 71 | +; RV64I-LABEL: dont_fold_urem_srem_mixed_constants: |
| 72 | +; RV64I: # %bb.0: |
| 73 | +; RV64I-NEXT: addi sp, sp, -16 |
| 74 | +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| 75 | +; RV64I-NEXT: slli a0, a0, 32 |
| 76 | +; RV64I-NEXT: srli a0, a0, 32 |
| 77 | +; RV64I-NEXT: li a1, 25 |
| 78 | +; RV64I-NEXT: call __umoddi3 |
| 79 | +; RV64I-NEXT: li a1, 3 |
| 80 | +; RV64I-NEXT: call __umoddi3 |
| 81 | +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| 82 | +; RV64I-NEXT: addi sp, sp, 16 |
| 83 | +; RV64I-NEXT: ret |
| 84 | + %v1 = urem i32 %v0, 25 |
| 85 | + %v2 = srem i32 %v1, 3 |
| 86 | + ret i32 %v2 |
| 87 | +} |
| 88 | + |
| 89 | +define i32 @dont_fold_srem_urem_mixed_constants(i32 %v0) nounwind { |
| 90 | +; RV32I-LABEL: dont_fold_srem_urem_mixed_constants: |
| 91 | +; RV32I: # %bb.0: |
| 92 | +; RV32I-NEXT: addi sp, sp, -16 |
| 93 | +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| 94 | +; RV32I-NEXT: li a1, 25 |
| 95 | +; RV32I-NEXT: call __modsi3 |
| 96 | +; RV32I-NEXT: li a1, 3 |
| 97 | +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| 98 | +; RV32I-NEXT: addi sp, sp, 16 |
| 99 | +; RV32I-NEXT: tail __umodsi3 |
| 100 | +; |
| 101 | +; RV64I-LABEL: dont_fold_srem_urem_mixed_constants: |
| 102 | +; RV64I: # %bb.0: |
| 103 | +; RV64I-NEXT: addi sp, sp, -16 |
| 104 | +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| 105 | +; RV64I-NEXT: sext.w a0, a0 |
| 106 | +; RV64I-NEXT: li a1, 25 |
| 107 | +; RV64I-NEXT: call __moddi3 |
| 108 | +; RV64I-NEXT: slli a0, a0, 32 |
| 109 | +; RV64I-NEXT: srli a0, a0, 32 |
| 110 | +; RV64I-NEXT: li a1, 3 |
| 111 | +; RV64I-NEXT: call __umoddi3 |
| 112 | +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| 113 | +; RV64I-NEXT: addi sp, sp, 16 |
| 114 | +; RV64I-NEXT: ret |
| 115 | + %v1 = srem i32 %v0, 25 |
| 116 | + %v2 = urem i32 %v1, 3 |
| 117 | + ret i32 %v2 |
| 118 | +} |
| 119 | +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| 120 | +; CHECK: {{.*}} |
0 commit comments