|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -S -passes=instcombine < %s | FileCheck %s |
| 3 | + |
| 4 | +%struct.svfloat32_wrapped_t = type { <16 x float> } |
| 5 | + |
| 6 | +define <vscale x 4 x float> @store_to_vector_load_different_type(<vscale x 4 x float> %.coerce) #0 { |
| 7 | +; CHECK-LABEL: define <vscale x 4 x float> @store_to_vector_load_different_type( |
| 8 | +; CHECK-SAME: <vscale x 4 x float> [[DOTCOERCE:%.*]]) #[[ATTR0:[0-9]+]] { |
| 9 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 10 | +; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_SVFLOAT32_WRAPPED_T:%.*]], align 64 |
| 11 | +; CHECK-NEXT: [[TMP0:%.*]] = fadd <vscale x 4 x float> [[DOTCOERCE]], [[DOTCOERCE]] |
| 12 | +; CHECK-NEXT: store <vscale x 4 x float> [[TMP0]], ptr [[RETVAL]], align 16 |
| 13 | +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr [[RETVAL]], align 64 |
| 14 | +; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[TMP1]], i64 0) |
| 15 | +; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]] |
| 16 | +; |
| 17 | +entry: |
| 18 | + %retval = alloca %struct.svfloat32_wrapped_t |
| 19 | + %0 = fadd <vscale x 4 x float> %.coerce, %.coerce |
| 20 | + store <vscale x 4 x float> %0, ptr %retval |
| 21 | + %1 = load <16 x float>, ptr %retval |
| 22 | + %cast.scalable = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> %1, i64 0) |
| 23 | + ret <vscale x 4 x float> %cast.scalable |
| 24 | +} |
| 25 | + |
| 26 | +define <vscale x 4 x float> @vscale_not_fixed(<vscale x 4 x float> %.coerce) #1 { |
| 27 | +; CHECK-LABEL: define <vscale x 4 x float> @vscale_not_fixed( |
| 28 | +; CHECK-SAME: <vscale x 4 x float> [[DOTCOERCE:%.*]]) #[[ATTR1:[0-9]+]] { |
| 29 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 30 | +; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_SVFLOAT32_WRAPPED_T:%.*]], align 64 |
| 31 | +; CHECK-NEXT: [[TMP0:%.*]] = fadd <vscale x 4 x float> [[DOTCOERCE]], [[DOTCOERCE]] |
| 32 | +; CHECK-NEXT: store <vscale x 4 x float> [[TMP0]], ptr [[RETVAL]], align 16 |
| 33 | +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x float>, ptr [[RETVAL]], align 64 |
| 34 | +; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[TMP1]], i64 0) |
| 35 | +; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]] |
| 36 | +; |
| 37 | +entry: |
| 38 | + %retval = alloca %struct.svfloat32_wrapped_t |
| 39 | + %0 = fadd <vscale x 4 x float> %.coerce, %.coerce |
| 40 | + store <vscale x 4 x float> %0, ptr %retval |
| 41 | + %1 = load <16 x float>, ptr %retval |
| 42 | + %cast.scalable = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> %1, i64 0) |
| 43 | + ret <vscale x 4 x float> %cast.scalable |
| 44 | +} |
| 45 | + |
| 46 | +define <vscale x 4 x float> @sizes_do_not_match(<vscale x 4 x float> %.coerce) #0 { |
| 47 | +; CHECK-LABEL: define <vscale x 4 x float> @sizes_do_not_match( |
| 48 | +; CHECK-SAME: <vscale x 4 x float> [[DOTCOERCE:%.*]]) #[[ATTR0]] { |
| 49 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 50 | +; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_SVFLOAT32_WRAPPED_T:%.*]], align 64 |
| 51 | +; CHECK-NEXT: [[TMP0:%.*]] = fadd <vscale x 4 x float> [[DOTCOERCE]], [[DOTCOERCE]] |
| 52 | +; CHECK-NEXT: store <vscale x 4 x float> [[TMP0]], ptr [[RETVAL]], align 16 |
| 53 | +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr [[RETVAL]], align 32 |
| 54 | +; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> poison, <8 x float> [[TMP1]], i64 0) |
| 55 | +; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]] |
| 56 | +; |
| 57 | +entry: |
| 58 | + %retval = alloca %struct.svfloat32_wrapped_t |
| 59 | + %0 = fadd <vscale x 4 x float> %.coerce, %.coerce |
| 60 | + store <vscale x 4 x float> %0, ptr %retval |
| 61 | + %1 = load <8 x float>, ptr %retval |
| 62 | + %cast.scalable = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> poison, <8 x float> %1, i64 0) |
| 63 | + ret <vscale x 4 x float> %cast.scalable |
| 64 | +} |
| 65 | + |
| 66 | +declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float>, <16 x float>, i64 immarg) |
| 67 | +declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float>, <8 x float>, i64 immarg) |
| 68 | + |
| 69 | +attributes #0 = { vscale_range(4,4) } |
| 70 | +attributes #1 = { vscale_range(1,16) } |
0 commit comments