|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -S -passes=instcombine < %s | FileCheck %s |
| 3 | + |
| 4 | +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" |
| 5 | + |
| 6 | +define {<16 x i8>, <16 x i8>} @check_v16i8_v4i32({<4 x i32>, <4 x i32>} %x, ptr %p) nounwind { |
| 7 | +; CHECK-LABEL: define { <16 x i8>, <16 x i8> } @check_v16i8_v4i32( |
| 8 | +; CHECK-SAME: { <4 x i32>, <4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { |
| 9 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 10 | +; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 0 |
| 11 | +; CHECK-NEXT: store <4 x i32> [[X_ELT]], ptr [[P]], align 16 |
| 12 | +; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16 |
| 13 | +; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 1 |
| 14 | +; CHECK-NEXT: store <4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16 |
| 15 | +; CHECK-NEXT: [[R_UNPACK_CAST:%.*]] = bitcast <4 x i32> [[X_ELT]] to <16 x i8> |
| 16 | +; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <16 x i8>, <16 x i8> } poison, <16 x i8> [[R_UNPACK_CAST]], 0 |
| 17 | +; CHECK-NEXT: [[R_UNPACK4_CAST:%.*]] = bitcast <4 x i32> [[X_ELT2]] to <16 x i8> |
| 18 | +; CHECK-NEXT: [[R5:%.*]] = insertvalue { <16 x i8>, <16 x i8> } [[TMP0]], <16 x i8> [[R_UNPACK4_CAST]], 1 |
| 19 | +; CHECK-NEXT: ret { <16 x i8>, <16 x i8> } [[R5]] |
| 20 | +; |
| 21 | +entry: |
| 22 | + store {<4 x i32>, <4 x i32>} %x, ptr %p |
| 23 | + %r = load {<16 x i8>, <16 x i8>}, ptr %p |
| 24 | + ret {<16 x i8>, <16 x i8>} %r |
| 25 | +} |
| 26 | + |
| 27 | +define {<vscale x 16 x i8>, <vscale x 16 x i8>} @check_nxv16i8_nxv4i32({<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p) nounwind { |
| 28 | +; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @check_nxv16i8_nxv4i32( |
| 29 | +; CHECK-SAME: { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { |
| 30 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 31 | +; CHECK-NEXT: store { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], ptr [[P]], align 16 |
| 32 | +; CHECK-NEXT: [[R:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[P]], align 16 |
| 33 | +; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R]] |
| 34 | +; |
| 35 | +entry: |
| 36 | + store {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p |
| 37 | + %r = load {<vscale x 16 x i8>, <vscale x 16 x i8>}, ptr %p |
| 38 | + ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %r |
| 39 | +} |
| 40 | + |
| 41 | +define {<vscale x 16 x i8>, <vscale x 16 x i8>} @alloca_nxv16i8_nxv4i32({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) nounwind { |
| 42 | +; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @alloca_nxv16i8_nxv4i32( |
| 43 | +; CHECK-SAME: { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]]) #[[ATTR0]] { |
| 44 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 45 | +; CHECK-NEXT: [[P:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16 |
| 46 | +; CHECK-NEXT: store { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], ptr [[P]], align 16 |
| 47 | +; CHECK-NEXT: [[R:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[P]], align 16 |
| 48 | +; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R]] |
| 49 | +; |
| 50 | +entry: |
| 51 | + %p = alloca {<vscale x 4 x i32>, <vscale x 4 x i32>} |
| 52 | + store {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p |
| 53 | + %r = load {<vscale x 16 x i8>, <vscale x 16 x i8>}, ptr %p |
| 54 | + ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %r |
| 55 | +} |
| 56 | + |
| 57 | +define { <16 x i8>, <32 x i8> } @differenttypes({ <4 x i32>, <8 x i32> } %a, ptr %p) { |
| 58 | +; CHECK-LABEL: define { <16 x i8>, <32 x i8> } @differenttypes( |
| 59 | +; CHECK-SAME: { <4 x i32>, <8 x i32> } [[A:%.*]], ptr [[P:%.*]]) { |
| 60 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 61 | +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[P]]) |
| 62 | +; CHECK-NEXT: store { <4 x i32>, <8 x i32> } [[A]], ptr [[P]], align 16 |
| 63 | +; CHECK-NEXT: [[TMP0:%.*]] = load { <16 x i8>, <32 x i8> }, ptr [[P]], align 16 |
| 64 | +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[P]]) |
| 65 | +; CHECK-NEXT: ret { <16 x i8>, <32 x i8> } [[TMP0]] |
| 66 | +; |
| 67 | +entry: |
| 68 | + call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %p) #5 |
| 69 | + store { <4 x i32>, <8 x i32> } %a, ptr %p, align 16 |
| 70 | + %2 = load { <16 x i8>, <32 x i8> }, ptr %p, align 16 |
| 71 | + call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %p) #5 |
| 72 | + ret { <16 x i8>, <32 x i8> } %2 |
| 73 | +} |
0 commit comments