|
1 | | -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
2 | 1 | ; RUN: llc -mtriple=riscv32 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \ |
3 | 2 | ; RUN: | FileCheck %s -check-prefix=RV32XMIPSPREFETCH |
4 | 3 | ; RUN: llc -mtriple=riscv64 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \ |
5 | 4 | ; RUN: | FileCheck %s -check-prefix=RV64XMIPSPREFETCH |
6 | 5 |
|
7 | | -define dso_local void @prefetch_read(ptr noundef %a) { |
| 6 | +define void @prefetch_read(ptr noundef %ptr) nounwind { |
8 | 7 | ; RV32XMIPSPREFETCH-LABEL: prefetch_read: |
9 | | -; RV32XMIPSPREFETCH: mips.perf 8, 511(a0) |
| 8 | +; RV32XMIPSPREFETCH: mips.pref 8, 1(a0) |
10 | 9 | ; |
11 | 10 | ; RV64XMIPSPREFETCH-LABEL: prefetch_read: |
12 | | -; RV64XMIPSPREFETCH: mips.perf 8, 511(a0) |
| 11 | +; RV64XMIPSPREFETCH: mips.pref 8, 1(a0) |
13 | 12 | entry: |
14 | | - %a.addr = alloca ptr, align 8 |
15 | | - store ptr %a, ptr %a.addr, align 8 |
16 | | - %0 = load ptr, ptr %a.addr, align 8 |
17 | | - %arrayidx = getelementptr inbounds i8, ptr %0, i64 511 |
18 | | - call void @llvm.prefetch.p0(ptr %arrayidx, i32 0, i32 0, i32 1) |
| 13 | + %arrayidx = getelementptr inbounds nuw i8, ptr %ptr, i64 1 |
| 14 | + tail call void @llvm.prefetch.p0(ptr nonnull %arrayidx, i32 0, i32 0, i32 1) |
| 15 | + ret void |
19 | 16 | ret void |
20 | 17 | } |
21 | | - |
22 | | -declare void @llvm.prefetch.p0(ptr readonly captures(none), i32 immarg, i32 immarg, i32 immarg) |
23 | | - |
24 | | -define dso_local void @prefetch_write(ptr noundef %a) { |
25 | | -entry: |
| 18 | + |
| 19 | +define void @prefetch_write(ptr noundef %ptr) nounwind { |
26 | 20 | ; RV32XMIPSPREFETCH-LABEL: prefetch_write: |
27 | | -; RV32XMIPSPREFETCH: addi a1, a0, 512 |
28 | | -; RV32XMIPSPREFETCH-NEXT: mips.perf 9, 0(a1) |
| 21 | +; RV32XMIPSPREFETCH: addi a0, a0, 512 |
| 22 | +; RV32XMIPSPREFETCH-NEXT: mips.pref 9, 0(a0) |
29 | 23 | ; |
30 | 24 | ; RV64XMIPSPREFETCH-LABEL: prefetch_write: |
31 | | -; RV64XMIPSPREFETCH: addi a1, a0, 512 |
32 | | -; RV64XMIPSPREFETCH-NEXT: mips.perf 9, 0(a1) |
33 | | - %a.addr = alloca ptr, align 8 |
34 | | - store ptr %a, ptr %a.addr, align 8 |
35 | | - %0 = load ptr, ptr %a.addr, align 8 |
36 | | - %arrayidx = getelementptr inbounds i8, ptr %0, i64 512 |
37 | | - call void @llvm.prefetch.p0(ptr %arrayidx, i32 1, i32 0, i32 1) |
| 25 | +; RV64XMIPSPREFETCH: addi a0, a0, 512 |
| 26 | +; RV64XMIPSPREFETCH-NEXT: mips.pref 9, 0(a0) |
| 27 | +entry: |
| 28 | + %arrayidx = getelementptr inbounds nuw i8, ptr %ptr, i64 512 |
| 29 | + tail call void @llvm.prefetch.p0(ptr nonnull %arrayidx, i32 1, i32 0, i32 1) |
38 | 30 | ret void |
39 | 31 | } |
40 | 32 |
|
0 commit comments