|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +;; Check that we don't expand for CHERI targets (yet): https://github.com/CTSRD-CHERI/llvm-project/issues/753 |
| 3 | +; RUN: opt -S -mtriple=riscv64 -pre-isel-intrinsic-lowering -mem-intrinsic-expand-size=0 --print-before-all < %s | FileCheck %s --check-prefixes=CHECK,RISCV |
| 4 | +; RUN: opt -S -mtriple=riscv64 -data-layout="pf200:128:128:128:64" -mem-intrinsic-expand-size=0 -pre-isel-intrinsic-lowering < %s | FileCheck %s --check-prefixes=CHECK,HYBRID |
| 5 | + |
| 6 | +declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) |
| 7 | +declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) |
| 8 | +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) |
| 9 | + |
| 10 | +define void @call_memset(ptr align 16 %dst) #0 { |
| 11 | +; CHECK-LABEL: @call_memset( |
| 12 | +; CHECK-NEXT: entry: |
| 13 | +; CHECK-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]] |
| 14 | +; CHECK: loadstoreloop: |
| 15 | +; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[LOADSTORELOOP]] ] |
| 16 | +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]] |
| 17 | +; CHECK-NEXT: store i8 0, ptr [[TMP1]], align 1 |
| 18 | +; CHECK-NEXT: [[TMP2]] = add i64 [[TMP0]], 1 |
| 19 | +; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 40 |
| 20 | +; CHECK-NEXT: br i1 [[TMP3]], label [[LOADSTORELOOP]], label [[SPLIT]] |
| 21 | +; CHECK: split: |
| 22 | +; CHECK-NEXT: ret void |
| 23 | +; |
| 24 | +entry: |
| 25 | + call void @llvm.memset.p0.i64(ptr align 16 %dst, i8 0, i64 40, i1 false) |
| 26 | + ret void |
| 27 | +} |
| 28 | + |
| 29 | +define void @call_memset_variable(ptr align 16 %dst, i64 %len) #0 { |
| 30 | +; CHECK-LABEL: @call_memset_variable( |
| 31 | +; CHECK-NEXT: entry: |
| 32 | +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i64 0, [[LEN:%.*]] |
| 33 | +; CHECK-NEXT: br i1 [[TMP0]], label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]] |
| 34 | +; CHECK: loadstoreloop: |
| 35 | +; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ] |
| 36 | +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP1]] |
| 37 | +; CHECK-NEXT: store i8 0, ptr [[TMP2]], align 1 |
| 38 | +; CHECK-NEXT: [[TMP3]] = add i64 [[TMP1]], 1 |
| 39 | +; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], [[LEN]] |
| 40 | +; CHECK-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]] |
| 41 | +; CHECK: split: |
| 42 | +; CHECK-NEXT: ret void |
| 43 | +; |
| 44 | +entry: |
| 45 | + call void @llvm.memset.p0.i64(ptr align 16 %dst, i8 0, i64 %len, i1 false) |
| 46 | + ret void |
| 47 | +} |
| 48 | + |
| 49 | +define void @call_memcpy(ptr align 16 %dst, ptr align 16 %src) #0 { |
| 50 | +; RISCV-LABEL: @call_memcpy( |
| 51 | +; RISCV-NEXT: entry: |
| 52 | +; RISCV-NEXT: br label [[LOAD_STORE_LOOP:%.*]] |
| 53 | +; RISCV: load-store-loop: |
| 54 | +; RISCV-NEXT: [[LOOP_INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOAD_STORE_LOOP]] ] |
| 55 | +; RISCV-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[LOOP_INDEX]] |
| 56 | +; RISCV-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 |
| 57 | +; RISCV-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[LOOP_INDEX]] |
| 58 | +; RISCV-NEXT: store i8 [[TMP1]], ptr [[TMP2]], align 1 |
| 59 | +; RISCV-NEXT: [[TMP3]] = add i64 [[LOOP_INDEX]], 1 |
| 60 | +; RISCV-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 40 |
| 61 | +; RISCV-NEXT: br i1 [[TMP4]], label [[LOAD_STORE_LOOP]], label [[MEMCPY_SPLIT:%.*]] |
| 62 | +; RISCV: memcpy-split: |
| 63 | +; RISCV-NEXT: ret void |
| 64 | +; |
| 65 | +; HYBRID-LABEL: @call_memcpy( |
| 66 | +; HYBRID-NEXT: entry: |
| 67 | +; HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DST:%.*]], ptr align 16 [[SRC:%.*]], i64 40, i1 false) |
| 68 | +; HYBRID-NEXT: ret void |
| 69 | +; |
| 70 | +entry: |
| 71 | + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %dst, ptr align 16 %src, i64 40, i1 false) |
| 72 | + ret void |
| 73 | +} |
| 74 | + |
| 75 | +define void @call_memcpy_variable(ptr align 16 %dst, ptr align 16 %src, i64 %len) #0 { |
| 76 | +; RISCV-LABEL: @call_memcpy_variable( |
| 77 | +; RISCV-NEXT: entry: |
| 78 | +; RISCV-NEXT: [[TMP0:%.*]] = icmp ne i64 [[LEN:%.*]], 0 |
| 79 | +; RISCV-NEXT: br i1 [[TMP0]], label [[LOOP_MEMCPY_EXPANSION:%.*]], label [[POST_LOOP_MEMCPY_EXPANSION:%.*]] |
| 80 | +; RISCV: loop-memcpy-expansion: |
| 81 | +; RISCV-NEXT: [[LOOP_INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP4:%.*]], [[LOOP_MEMCPY_EXPANSION]] ] |
| 82 | +; RISCV-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[LOOP_INDEX]] |
| 83 | +; RISCV-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 |
| 84 | +; RISCV-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[LOOP_INDEX]] |
| 85 | +; RISCV-NEXT: store i8 [[TMP2]], ptr [[TMP3]], align 1 |
| 86 | +; RISCV-NEXT: [[TMP4]] = add i64 [[LOOP_INDEX]], 1 |
| 87 | +; RISCV-NEXT: [[TMP5:%.*]] = icmp ult i64 [[TMP4]], [[LEN]] |
| 88 | +; RISCV-NEXT: br i1 [[TMP5]], label [[LOOP_MEMCPY_EXPANSION]], label [[POST_LOOP_MEMCPY_EXPANSION]] |
| 89 | +; RISCV: post-loop-memcpy-expansion: |
| 90 | +; RISCV-NEXT: ret void |
| 91 | +; |
| 92 | +; HYBRID-LABEL: @call_memcpy_variable( |
| 93 | +; HYBRID-NEXT: entry: |
| 94 | +; HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DST:%.*]], ptr align 16 [[SRC:%.*]], i64 [[LEN:%.*]], i1 false) |
| 95 | +; HYBRID-NEXT: ret void |
| 96 | +; |
| 97 | +entry: |
| 98 | + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %dst, ptr align 16 %src, i64 %len, i1 false) |
| 99 | + ret void |
| 100 | +} |
| 101 | + |
| 102 | +define void @call_memmove(ptr align 16 %dst, ptr align 16 %src) #0 { |
| 103 | +; RISCV-LABEL: @call_memmove( |
| 104 | +; RISCV-NEXT: entry: |
| 105 | +; RISCV-NEXT: [[COMPARE_SRC_DST:%.*]] = icmp ult ptr [[SRC:%.*]], [[DST:%.*]] |
| 106 | +; RISCV-NEXT: [[COMPARE_N_TO_0:%.*]] = icmp eq i64 40, 0 |
| 107 | +; RISCV-NEXT: br i1 [[COMPARE_SRC_DST]], label [[COPY_BACKWARDS:%.*]], label [[COPY_FORWARD:%.*]] |
| 108 | +; RISCV: copy_backwards: |
| 109 | +; RISCV-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE:%.*]], label [[COPY_BACKWARDS_LOOP:%.*]] |
| 110 | +; RISCV: copy_backwards_loop: |
| 111 | +; RISCV-NEXT: [[TMP0:%.*]] = phi i64 [ [[INDEX_PTR:%.*]], [[COPY_BACKWARDS_LOOP]] ], [ 40, [[COPY_BACKWARDS]] ] |
| 112 | +; RISCV-NEXT: [[INDEX_PTR]] = sub i64 [[TMP0]], 1 |
| 113 | +; RISCV-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX_PTR]] |
| 114 | +; RISCV-NEXT: [[ELEMENT:%.*]] = load i8, ptr [[TMP1]], align 1 |
| 115 | +; RISCV-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX_PTR]] |
| 116 | +; RISCV-NEXT: store i8 [[ELEMENT]], ptr [[TMP2]], align 1 |
| 117 | +; RISCV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_PTR]], 0 |
| 118 | +; RISCV-NEXT: br i1 [[TMP3]], label [[MEMMOVE_DONE]], label [[COPY_BACKWARDS_LOOP]] |
| 119 | +; RISCV: copy_forward: |
| 120 | +; RISCV-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP:%.*]] |
| 121 | +; RISCV: copy_forward_loop: |
| 122 | +; RISCV-NEXT: [[INDEX_PTR1:%.*]] = phi i64 [ [[INDEX_INCREMENT:%.*]], [[COPY_FORWARD_LOOP]] ], [ 0, [[COPY_FORWARD]] ] |
| 123 | +; RISCV-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX_PTR1]] |
| 124 | +; RISCV-NEXT: [[ELEMENT2:%.*]] = load i8, ptr [[TMP4]], align 1 |
| 125 | +; RISCV-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX_PTR1]] |
| 126 | +; RISCV-NEXT: store i8 [[ELEMENT2]], ptr [[TMP5]], align 1 |
| 127 | +; RISCV-NEXT: [[INDEX_INCREMENT]] = add i64 [[INDEX_PTR1]], 1 |
| 128 | +; RISCV-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_INCREMENT]], 40 |
| 129 | +; RISCV-NEXT: br i1 [[TMP6]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP]] |
| 130 | +; RISCV: memmove_done: |
| 131 | +; RISCV-NEXT: ret void |
| 132 | +; |
| 133 | +; HYBRID-LABEL: @call_memmove( |
| 134 | +; HYBRID-NEXT: entry: |
| 135 | +; HYBRID-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[DST:%.*]], ptr align 16 [[SRC:%.*]], i64 40, i1 false) |
| 136 | +; HYBRID-NEXT: ret void |
| 137 | +; |
| 138 | +entry: |
| 139 | + call void @llvm.memmove.p0.p0.i64(ptr align 16 %dst, ptr align 16 %src, i64 40, i1 false) |
| 140 | + ret void |
| 141 | +} |
| 142 | + |
| 143 | +define void @call_memmove_variable(ptr align 16 %dst, ptr align 16 %src, i64 %len) #0 { |
| 144 | +; RISCV-LABEL: @call_memmove_variable( |
| 145 | +; RISCV-NEXT: entry: |
| 146 | +; RISCV-NEXT: [[COMPARE_SRC_DST:%.*]] = icmp ult ptr [[SRC:%.*]], [[DST:%.*]] |
| 147 | +; RISCV-NEXT: [[COMPARE_N_TO_0:%.*]] = icmp eq i64 [[LEN:%.*]], 0 |
| 148 | +; RISCV-NEXT: br i1 [[COMPARE_SRC_DST]], label [[COPY_BACKWARDS:%.*]], label [[COPY_FORWARD:%.*]] |
| 149 | +; RISCV: copy_backwards: |
| 150 | +; RISCV-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE:%.*]], label [[COPY_BACKWARDS_LOOP:%.*]] |
| 151 | +; RISCV: copy_backwards_loop: |
| 152 | +; RISCV-NEXT: [[TMP0:%.*]] = phi i64 [ [[INDEX_PTR:%.*]], [[COPY_BACKWARDS_LOOP]] ], [ [[LEN]], [[COPY_BACKWARDS]] ] |
| 153 | +; RISCV-NEXT: [[INDEX_PTR]] = sub i64 [[TMP0]], 1 |
| 154 | +; RISCV-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX_PTR]] |
| 155 | +; RISCV-NEXT: [[ELEMENT:%.*]] = load i8, ptr [[TMP1]], align 1 |
| 156 | +; RISCV-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX_PTR]] |
| 157 | +; RISCV-NEXT: store i8 [[ELEMENT]], ptr [[TMP2]], align 1 |
| 158 | +; RISCV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_PTR]], 0 |
| 159 | +; RISCV-NEXT: br i1 [[TMP3]], label [[MEMMOVE_DONE]], label [[COPY_BACKWARDS_LOOP]] |
| 160 | +; RISCV: copy_forward: |
| 161 | +; RISCV-NEXT: br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP:%.*]] |
| 162 | +; RISCV: copy_forward_loop: |
| 163 | +; RISCV-NEXT: [[INDEX_PTR1:%.*]] = phi i64 [ [[INDEX_INCREMENT:%.*]], [[COPY_FORWARD_LOOP]] ], [ 0, [[COPY_FORWARD]] ] |
| 164 | +; RISCV-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX_PTR1]] |
| 165 | +; RISCV-NEXT: [[ELEMENT2:%.*]] = load i8, ptr [[TMP4]], align 1 |
| 166 | +; RISCV-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX_PTR1]] |
| 167 | +; RISCV-NEXT: store i8 [[ELEMENT2]], ptr [[TMP5]], align 1 |
| 168 | +; RISCV-NEXT: [[INDEX_INCREMENT]] = add i64 [[INDEX_PTR1]], 1 |
| 169 | +; RISCV-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_INCREMENT]], [[LEN]] |
| 170 | +; RISCV-NEXT: br i1 [[TMP6]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP]] |
| 171 | +; RISCV: memmove_done: |
| 172 | +; RISCV-NEXT: ret void |
| 173 | +; |
| 174 | +; HYBRID-LABEL: @call_memmove_variable( |
| 175 | +; HYBRID-NEXT: entry: |
| 176 | +; HYBRID-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 16 [[DST:%.*]], ptr align 16 [[SRC:%.*]], i64 [[LEN:%.*]], i1 false) |
| 177 | +; HYBRID-NEXT: ret void |
| 178 | +; |
| 179 | +entry: |
| 180 | + call void @llvm.memmove.p0.p0.i64(ptr align 16 %dst, ptr align 16 %src, i64 %len, i1 false) |
| 181 | + ret void |
| 182 | +} |
| 183 | + |
| 184 | +;; We need the "no-builtins" attribute here otherwise the IR lowering pass will |
| 185 | +;; be skipped since it assumes the libfunc can be called. |
| 186 | +attributes #0 = { nounwind "no-builtins" } |
0 commit comments