|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \ |
| 3 | +; RUN: | FileCheck %s -check-prefixes=CHECK,RV32I |
| 4 | +; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ |
| 5 | +; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB |
| 6 | +; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbkb -verify-machineinstrs < %s \ |
| 7 | +; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB |
| 8 | + |
| 9 | +define i32 @andn_i32(i32 %a, i32 %b) nounwind { |
| 10 | +; RV32I-LABEL: andn_i32: |
| 11 | +; RV32I: # %bb.0: |
| 12 | +; RV32I-NEXT: not a1, a1 |
| 13 | +; RV32I-NEXT: and a0, a1, a0 |
| 14 | +; RV32I-NEXT: ret |
| 15 | +; |
| 16 | +; RV32ZBB-ZBKB-LABEL: andn_i32: |
| 17 | +; RV32ZBB-ZBKB: # %bb.0: |
| 18 | +; RV32ZBB-ZBKB-NEXT: andn a0, a0, a1 |
| 19 | +; RV32ZBB-ZBKB-NEXT: ret |
| 20 | + %neg = xor i32 %b, -1 |
| 21 | + %and = and i32 %neg, %a |
| 22 | + ret i32 %and |
| 23 | +} |
| 24 | + |
| 25 | +define i64 @andn_i64(i64 %a, i64 %b) nounwind { |
| 26 | +; RV32I-LABEL: andn_i64: |
| 27 | +; RV32I: # %bb.0: |
| 28 | +; RV32I-NEXT: not a2, a2 |
| 29 | +; RV32I-NEXT: not a3, a3 |
| 30 | +; RV32I-NEXT: and a0, a2, a0 |
| 31 | +; RV32I-NEXT: and a1, a3, a1 |
| 32 | +; RV32I-NEXT: ret |
| 33 | +; |
| 34 | +; RV32ZBB-ZBKB-LABEL: andn_i64: |
| 35 | +; RV32ZBB-ZBKB: # %bb.0: |
| 36 | +; RV32ZBB-ZBKB-NEXT: andn a0, a0, a2 |
| 37 | +; RV32ZBB-ZBKB-NEXT: andn a1, a1, a3 |
| 38 | +; RV32ZBB-ZBKB-NEXT: ret |
| 39 | + %neg = xor i64 %b, -1 |
| 40 | + %and = and i64 %neg, %a |
| 41 | + ret i64 %and |
| 42 | +} |
| 43 | + |
| 44 | +define i32 @orn_i32(i32 %a, i32 %b) nounwind { |
| 45 | +; RV32I-LABEL: orn_i32: |
| 46 | +; RV32I: # %bb.0: |
| 47 | +; RV32I-NEXT: not a1, a1 |
| 48 | +; RV32I-NEXT: or a0, a1, a0 |
| 49 | +; RV32I-NEXT: ret |
| 50 | +; |
| 51 | +; RV32ZBB-ZBKB-LABEL: orn_i32: |
| 52 | +; RV32ZBB-ZBKB: # %bb.0: |
| 53 | +; RV32ZBB-ZBKB-NEXT: orn a0, a0, a1 |
| 54 | +; RV32ZBB-ZBKB-NEXT: ret |
| 55 | + %neg = xor i32 %b, -1 |
| 56 | + %or = or i32 %neg, %a |
| 57 | + ret i32 %or |
| 58 | +} |
| 59 | + |
| 60 | +define i64 @orn_i64(i64 %a, i64 %b) nounwind { |
| 61 | +; RV32I-LABEL: orn_i64: |
| 62 | +; RV32I: # %bb.0: |
| 63 | +; RV32I-NEXT: not a2, a2 |
| 64 | +; RV32I-NEXT: not a3, a3 |
| 65 | +; RV32I-NEXT: or a0, a2, a0 |
| 66 | +; RV32I-NEXT: or a1, a3, a1 |
| 67 | +; RV32I-NEXT: ret |
| 68 | +; |
| 69 | +; RV32ZBB-ZBKB-LABEL: orn_i64: |
| 70 | +; RV32ZBB-ZBKB: # %bb.0: |
| 71 | +; RV32ZBB-ZBKB-NEXT: orn a0, a0, a2 |
| 72 | +; RV32ZBB-ZBKB-NEXT: orn a1, a1, a3 |
| 73 | +; RV32ZBB-ZBKB-NEXT: ret |
| 74 | + %neg = xor i64 %b, -1 |
| 75 | + %or = or i64 %neg, %a |
| 76 | + ret i64 %or |
| 77 | +} |
| 78 | + |
| 79 | +define i32 @xnor_i32(i32 %a, i32 %b) nounwind { |
| 80 | +; RV32I-LABEL: xnor_i32: |
| 81 | +; RV32I: # %bb.0: |
| 82 | +; RV32I-NEXT: not a0, a0 |
| 83 | +; RV32I-NEXT: xor a0, a0, a1 |
| 84 | +; RV32I-NEXT: ret |
| 85 | +; |
| 86 | +; RV32ZBB-ZBKB-LABEL: xnor_i32: |
| 87 | +; RV32ZBB-ZBKB: # %bb.0: |
| 88 | +; RV32ZBB-ZBKB-NEXT: xnor a0, a0, a1 |
| 89 | +; RV32ZBB-ZBKB-NEXT: ret |
| 90 | + %neg = xor i32 %a, -1 |
| 91 | + %xor = xor i32 %neg, %b |
| 92 | + ret i32 %xor |
| 93 | +} |
| 94 | + |
| 95 | +define i64 @xnor_i64(i64 %a, i64 %b) nounwind { |
| 96 | +; RV32I-LABEL: xnor_i64: |
| 97 | +; RV32I: # %bb.0: |
| 98 | +; RV32I-NEXT: not a0, a0 |
| 99 | +; RV32I-NEXT: not a1, a1 |
| 100 | +; RV32I-NEXT: xor a0, a0, a2 |
| 101 | +; RV32I-NEXT: xor a1, a1, a3 |
| 102 | +; RV32I-NEXT: ret |
| 103 | +; |
| 104 | +; RV32ZBB-ZBKB-LABEL: xnor_i64: |
| 105 | +; RV32ZBB-ZBKB: # %bb.0: |
| 106 | +; RV32ZBB-ZBKB-NEXT: xnor a0, a0, a2 |
| 107 | +; RV32ZBB-ZBKB-NEXT: xnor a1, a1, a3 |
| 108 | +; RV32ZBB-ZBKB-NEXT: ret |
| 109 | + %neg = xor i64 %a, -1 |
| 110 | + %xor = xor i64 %neg, %b |
| 111 | + ret i64 %xor |
| 112 | +} |
| 113 | + |
| 114 | +declare i32 @llvm.fshl.i32(i32, i32, i32) |
| 115 | + |
| 116 | +define i32 @rol_i32(i32 %a, i32 %b) nounwind { |
| 117 | +; RV32I-LABEL: rol_i32: |
| 118 | +; RV32I: # %bb.0: |
| 119 | +; RV32I-NEXT: neg a2, a1 |
| 120 | +; RV32I-NEXT: sll a1, a0, a1 |
| 121 | +; RV32I-NEXT: srl a0, a0, a2 |
| 122 | +; RV32I-NEXT: or a0, a1, a0 |
| 123 | +; RV32I-NEXT: ret |
| 124 | +; |
| 125 | +; RV32ZBB-ZBKB-LABEL: rol_i32: |
| 126 | +; RV32ZBB-ZBKB: # %bb.0: |
| 127 | +; RV32ZBB-ZBKB-NEXT: rol a0, a0, a1 |
| 128 | +; RV32ZBB-ZBKB-NEXT: ret |
| 129 | + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) |
| 130 | + ret i32 %or |
| 131 | +} |
| 132 | + |
| 133 | +; This test is presented here in case future expansions of the Bitmanip |
| 134 | +; extensions introduce instructions suitable for this pattern. |
| 135 | + |
| 136 | +declare i64 @llvm.fshl.i64(i64, i64, i64) |
| 137 | + |
| 138 | +define i64 @rol_i64(i64 %a, i64 %b) nounwind { |
| 139 | +; CHECK-LABEL: rol_i64: |
| 140 | +; CHECK: # %bb.0: |
| 141 | +; CHECK-NEXT: andi a6, a2, 63 |
| 142 | +; CHECK-NEXT: li a4, 32 |
| 143 | +; CHECK-NEXT: bltu a6, a4, .LBB7_2 |
| 144 | +; CHECK-NEXT: # %bb.1: |
| 145 | +; CHECK-NEXT: li a3, 0 |
| 146 | +; CHECK-NEXT: sub a5, a6, a4 |
| 147 | +; CHECK-NEXT: sll a7, a0, a5 |
| 148 | +; CHECK-NEXT: j .LBB7_3 |
| 149 | +; CHECK-NEXT: .LBB7_2: |
| 150 | +; CHECK-NEXT: sll a3, a0, a2 |
| 151 | +; CHECK-NEXT: neg a5, a6 |
| 152 | +; CHECK-NEXT: srl a5, a0, a5 |
| 153 | +; CHECK-NEXT: sll a7, a1, a2 |
| 154 | +; CHECK-NEXT: or a7, a5, a7 |
| 155 | +; CHECK-NEXT: .LBB7_3: |
| 156 | +; CHECK-NEXT: neg a5, a2 |
| 157 | +; CHECK-NEXT: mv a2, a1 |
| 158 | +; CHECK-NEXT: beqz a6, .LBB7_5 |
| 159 | +; CHECK-NEXT: # %bb.4: |
| 160 | +; CHECK-NEXT: mv a2, a7 |
| 161 | +; CHECK-NEXT: .LBB7_5: |
| 162 | +; CHECK-NEXT: andi a6, a5, 63 |
| 163 | +; CHECK-NEXT: bltu a6, a4, .LBB7_7 |
| 164 | +; CHECK-NEXT: # %bb.6: |
| 165 | +; CHECK-NEXT: sub a7, a6, a4 |
| 166 | +; CHECK-NEXT: srl a7, a1, a7 |
| 167 | +; CHECK-NEXT: bnez a6, .LBB7_8 |
| 168 | +; CHECK-NEXT: j .LBB7_9 |
| 169 | +; CHECK-NEXT: .LBB7_7: |
| 170 | +; CHECK-NEXT: srl a7, a0, a5 |
| 171 | +; CHECK-NEXT: neg t0, a6 |
| 172 | +; CHECK-NEXT: sll t0, a1, t0 |
| 173 | +; CHECK-NEXT: or a7, a7, t0 |
| 174 | +; CHECK-NEXT: beqz a6, .LBB7_9 |
| 175 | +; CHECK-NEXT: .LBB7_8: |
| 176 | +; CHECK-NEXT: mv a0, a7 |
| 177 | +; CHECK-NEXT: .LBB7_9: |
| 178 | +; CHECK-NEXT: bltu a6, a4, .LBB7_11 |
| 179 | +; CHECK-NEXT: # %bb.10: |
| 180 | +; CHECK-NEXT: li a1, 0 |
| 181 | +; CHECK-NEXT: j .LBB7_12 |
| 182 | +; CHECK-NEXT: .LBB7_11: |
| 183 | +; CHECK-NEXT: srl a1, a1, a5 |
| 184 | +; CHECK-NEXT: .LBB7_12: |
| 185 | +; CHECK-NEXT: or a0, a3, a0 |
| 186 | +; CHECK-NEXT: or a1, a2, a1 |
| 187 | +; CHECK-NEXT: ret |
| 188 | + %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b) |
| 189 | + ret i64 %or |
| 190 | +} |
| 191 | + |
| 192 | +declare i32 @llvm.fshr.i32(i32, i32, i32) |
| 193 | + |
| 194 | +define i32 @ror_i32(i32 %a, i32 %b) nounwind { |
| 195 | +; RV32I-LABEL: ror_i32: |
| 196 | +; RV32I: # %bb.0: |
| 197 | +; RV32I-NEXT: neg a2, a1 |
| 198 | +; RV32I-NEXT: srl a1, a0, a1 |
| 199 | +; RV32I-NEXT: sll a0, a0, a2 |
| 200 | +; RV32I-NEXT: or a0, a1, a0 |
| 201 | +; RV32I-NEXT: ret |
| 202 | +; |
| 203 | +; RV32ZBB-ZBKB-LABEL: ror_i32: |
| 204 | +; RV32ZBB-ZBKB: # %bb.0: |
| 205 | +; RV32ZBB-ZBKB-NEXT: ror a0, a0, a1 |
| 206 | +; RV32ZBB-ZBKB-NEXT: ret |
| 207 | + %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) |
| 208 | + ret i32 %or |
| 209 | +} |
| 210 | + |
| 211 | +; This test is presented here in case future expansions of the Bitmanip |
| 212 | +; extensions introduce instructions suitable for this pattern. |
| 213 | + |
| 214 | +declare i64 @llvm.fshr.i64(i64, i64, i64) |
| 215 | + |
| 216 | +define i64 @ror_i64(i64 %a, i64 %b) nounwind { |
| 217 | +; CHECK-LABEL: ror_i64: |
| 218 | +; CHECK: # %bb.0: |
| 219 | +; CHECK-NEXT: andi a5, a2, 63 |
| 220 | +; CHECK-NEXT: li a4, 32 |
| 221 | +; CHECK-NEXT: bltu a5, a4, .LBB9_2 |
| 222 | +; CHECK-NEXT: # %bb.1: |
| 223 | +; CHECK-NEXT: sub a3, a5, a4 |
| 224 | +; CHECK-NEXT: srl a6, a1, a3 |
| 225 | +; CHECK-NEXT: mv a3, a0 |
| 226 | +; CHECK-NEXT: bnez a5, .LBB9_3 |
| 227 | +; CHECK-NEXT: j .LBB9_4 |
| 228 | +; CHECK-NEXT: .LBB9_2: |
| 229 | +; CHECK-NEXT: srl a3, a0, a2 |
| 230 | +; CHECK-NEXT: neg a6, a5 |
| 231 | +; CHECK-NEXT: sll a6, a1, a6 |
| 232 | +; CHECK-NEXT: or a6, a3, a6 |
| 233 | +; CHECK-NEXT: mv a3, a0 |
| 234 | +; CHECK-NEXT: beqz a5, .LBB9_4 |
| 235 | +; CHECK-NEXT: .LBB9_3: |
| 236 | +; CHECK-NEXT: mv a3, a6 |
| 237 | +; CHECK-NEXT: .LBB9_4: |
| 238 | +; CHECK-NEXT: neg a7, a2 |
| 239 | +; CHECK-NEXT: bltu a5, a4, .LBB9_7 |
| 240 | +; CHECK-NEXT: # %bb.5: |
| 241 | +; CHECK-NEXT: li a2, 0 |
| 242 | +; CHECK-NEXT: andi a5, a7, 63 |
| 243 | +; CHECK-NEXT: bgeu a5, a4, .LBB9_8 |
| 244 | +; CHECK-NEXT: .LBB9_6: |
| 245 | +; CHECK-NEXT: sll a6, a0, a7 |
| 246 | +; CHECK-NEXT: neg a4, a5 |
| 247 | +; CHECK-NEXT: srl a0, a0, a4 |
| 248 | +; CHECK-NEXT: sll a4, a1, a7 |
| 249 | +; CHECK-NEXT: or a0, a0, a4 |
| 250 | +; CHECK-NEXT: bnez a5, .LBB9_9 |
| 251 | +; CHECK-NEXT: j .LBB9_10 |
| 252 | +; CHECK-NEXT: .LBB9_7: |
| 253 | +; CHECK-NEXT: srl a2, a1, a2 |
| 254 | +; CHECK-NEXT: andi a5, a7, 63 |
| 255 | +; CHECK-NEXT: bltu a5, a4, .LBB9_6 |
| 256 | +; CHECK-NEXT: .LBB9_8: |
| 257 | +; CHECK-NEXT: li a6, 0 |
| 258 | +; CHECK-NEXT: sub a4, a5, a4 |
| 259 | +; CHECK-NEXT: sll a0, a0, a4 |
| 260 | +; CHECK-NEXT: beqz a5, .LBB9_10 |
| 261 | +; CHECK-NEXT: .LBB9_9: |
| 262 | +; CHECK-NEXT: mv a1, a0 |
| 263 | +; CHECK-NEXT: .LBB9_10: |
| 264 | +; CHECK-NEXT: or a0, a3, a6 |
| 265 | +; CHECK-NEXT: or a1, a2, a1 |
| 266 | +; CHECK-NEXT: ret |
| 267 | + %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b) |
| 268 | + ret i64 %or |
| 269 | +} |
| 270 | + |
| 271 | +define i32 @rori_i32_fshl(i32 %a) nounwind { |
| 272 | +; RV32I-LABEL: rori_i32_fshl: |
| 273 | +; RV32I: # %bb.0: |
| 274 | +; RV32I-NEXT: slli a1, a0, 31 |
| 275 | +; RV32I-NEXT: srli a0, a0, 1 |
| 276 | +; RV32I-NEXT: or a0, a1, a0 |
| 277 | +; RV32I-NEXT: ret |
| 278 | +; |
| 279 | +; RV32ZBB-ZBKB-LABEL: rori_i32_fshl: |
| 280 | +; RV32ZBB-ZBKB: # %bb.0: |
| 281 | +; RV32ZBB-ZBKB-NEXT: rori a0, a0, 1 |
| 282 | +; RV32ZBB-ZBKB-NEXT: ret |
| 283 | + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31) |
| 284 | + ret i32 %1 |
| 285 | +} |
| 286 | + |
| 287 | +define i32 @rori_i32_fshr(i32 %a) nounwind { |
| 288 | +; RV32I-LABEL: rori_i32_fshr: |
| 289 | +; RV32I: # %bb.0: |
| 290 | +; RV32I-NEXT: srli a1, a0, 31 |
| 291 | +; RV32I-NEXT: slli a0, a0, 1 |
| 292 | +; RV32I-NEXT: or a0, a1, a0 |
| 293 | +; RV32I-NEXT: ret |
| 294 | +; |
| 295 | +; RV32ZBB-ZBKB-LABEL: rori_i32_fshr: |
| 296 | +; RV32ZBB-ZBKB: # %bb.0: |
| 297 | +; RV32ZBB-ZBKB-NEXT: rori a0, a0, 31 |
| 298 | +; RV32ZBB-ZBKB-NEXT: ret |
| 299 | + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31) |
| 300 | + ret i32 %1 |
| 301 | +} |
| 302 | + |
| 303 | +define i64 @rori_i64(i64 %a) nounwind { |
| 304 | +; CHECK-LABEL: rori_i64: |
| 305 | +; CHECK: # %bb.0: |
| 306 | +; CHECK-NEXT: slli a2, a0, 31 |
| 307 | +; CHECK-NEXT: srli a0, a0, 1 |
| 308 | +; CHECK-NEXT: slli a3, a1, 31 |
| 309 | +; CHECK-NEXT: or a0, a0, a3 |
| 310 | +; CHECK-NEXT: srli a1, a1, 1 |
| 311 | +; CHECK-NEXT: or a0, zero, a0 |
| 312 | +; CHECK-NEXT: or a1, a2, a1 |
| 313 | +; CHECK-NEXT: ret |
| 314 | + %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63) |
| 315 | + ret i64 %1 |
| 316 | +} |
| 317 | + |
| 318 | +define i64 @rori_i64_fshr(i64 %a) nounwind { |
| 319 | +; CHECK-LABEL: rori_i64_fshr: |
| 320 | +; CHECK: # %bb.0: |
| 321 | +; CHECK-NEXT: srli a2, a1, 31 |
| 322 | +; CHECK-NEXT: slli a3, a0, 1 |
| 323 | +; CHECK-NEXT: slli a1, a1, 1 |
| 324 | +; CHECK-NEXT: srli a0, a0, 31 |
| 325 | +; CHECK-NEXT: or a1, a1, a0 |
| 326 | +; CHECK-NEXT: or a0, a2, a3 |
| 327 | +; CHECK-NEXT: or a1, zero, a1 |
| 328 | +; CHECK-NEXT: ret |
| 329 | + %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63) |
| 330 | + ret i64 %1 |
| 331 | +} |
| 332 | + |
| 333 | +define i8 @srli_i8(i8 %a) nounwind { |
| 334 | +; CHECK-LABEL: srli_i8: |
| 335 | +; CHECK: # %bb.0: |
| 336 | +; CHECK-NEXT: andi a0, a0, 255 |
| 337 | +; CHECK-NEXT: srli a0, a0, 6 |
| 338 | +; CHECK-NEXT: ret |
| 339 | + %1 = lshr i8 %a, 6 |
| 340 | + ret i8 %1 |
| 341 | +} |
| 342 | + |
| 343 | +; We could use sext.b+srai, but slli+srai offers more opportunities for |
| 344 | +; comppressed instructions. |
| 345 | +define i8 @srai_i8(i8 %a) nounwind { |
| 346 | +; RV32I-LABEL: srai_i8: |
| 347 | +; RV32I: # %bb.0: |
| 348 | +; RV32I-NEXT: slli a0, a0, 24 |
| 349 | +; RV32I-NEXT: srai a0, a0, 24 |
| 350 | +; RV32I-NEXT: srai a0, a0, 5 |
| 351 | +; RV32I-NEXT: ret |
| 352 | + %1 = ashr i8 %a, 5 |
| 353 | + ret i8 %1 |
| 354 | +} |
| 355 | + |
| 356 | +; We could use zext.h+srli, but slli+srli offers more opportunities for |
| 357 | +; comppressed instructions. |
| 358 | +define i16 @srli_i16(i16 %a) nounwind { |
| 359 | +; RV32I-LABEL: srli_i16: |
| 360 | +; RV32I: # %bb.0: |
| 361 | +; RV32I-NEXT: lui a1, 16 |
| 362 | +; RV32I-NEXT: addi a1, a1, -1 |
| 363 | +; RV32I-NEXT: and a0, a0, a1 |
| 364 | +; RV32I-NEXT: srli a0, a0, 6 |
| 365 | +; RV32I-NEXT: ret |
| 366 | +; |
| 367 | +; RV32ZBB-ZBKB-LABEL: srli_i16: |
| 368 | +; RV32ZBB-ZBKB: # %bb.0: |
| 369 | +; RV32ZBB-ZBKB-NEXT: zext.h a0, a0 |
| 370 | +; RV32ZBB-ZBKB-NEXT: srli a0, a0, 6 |
| 371 | +; RV32ZBB-ZBKB-NEXT: ret |
| 372 | + %1 = lshr i16 %a, 6 |
| 373 | + ret i16 %1 |
| 374 | +} |
| 375 | + |
| 376 | +; We could use sext.h+srai, but slli+srai offers more opportunities for |
| 377 | +; comppressed instructions. |
| 378 | +define i16 @srai_i16(i16 %a) nounwind { |
| 379 | +; RV32I-LABEL: srai_i16: |
| 380 | +; RV32I: # %bb.0: |
| 381 | +; RV32I-NEXT: slli a0, a0, 16 |
| 382 | +; RV32I-NEXT: srai a0, a0, 16 |
| 383 | +; RV32I-NEXT: srai a0, a0, 9 |
| 384 | +; RV32I-NEXT: ret |
| 385 | + %1 = ashr i16 %a, 9 |
| 386 | + ret i16 %1 |
| 387 | +} |
0 commit comments