|
16 | 16 | #define SVE_ACLE_FUNC(A1,A2,A3) A1##A2##A3 |
17 | 17 | #endif |
18 | 18 |
|
| 19 | +// CHECK-LABEL: @test_cvt_f16_x2( |
| 20 | +// CHECK-NEXT: entry: |
| 21 | +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 22 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8f16(<vscale x 8 x half> [[ZN_COERCE0:%.*]], <vscale x 8 x half> [[ZN_COERCE1:%.*]]) |
| 23 | +// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 24 | +// |
| 25 | +// CPP-CHECK-LABEL: @_Z15test_cvt_f16_x213svfloat16x2_tm( |
| 26 | +// CPP-CHECK-NEXT: entry: |
| 27 | +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 28 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8f16(<vscale x 8 x half> [[ZN_COERCE0:%.*]], <vscale x 8 x half> [[ZN_COERCE1:%.*]]) |
| 29 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 30 | +// |
| 31 | +svmfloat8_t test_cvt_f16_x2(svfloat16x2_t zn, fpm_t fpmr) __arm_streaming { |
| 32 | + return SVE_ACLE_FUNC(svcvt_mf8,_f16_x2,_fpm)(zn, fpmr); |
| 33 | +} |
| 34 | + |
| 35 | +// CHECK-LABEL: @test_cvt_f32_x4( |
| 36 | +// CHECK-NEXT: entry: |
| 37 | +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 38 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> [[ZN_COERCE0:%.*]], <vscale x 4 x float> [[ZN_COERCE1:%.*]], <vscale x 4 x float> [[ZN_COERCE2:%.*]], <vscale x 4 x float> [[ZN_COERCE3:%.*]]) |
| 39 | +// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 40 | +// |
| 41 | +// CPP-CHECK-LABEL: @_Z15test_cvt_f32_x413svfloat32x4_tm( |
| 42 | +// CPP-CHECK-NEXT: entry: |
| 43 | +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 44 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> [[ZN_COERCE0:%.*]], <vscale x 4 x float> [[ZN_COERCE1:%.*]], <vscale x 4 x float> [[ZN_COERCE2:%.*]], <vscale x 4 x float> [[ZN_COERCE3:%.*]]) |
| 45 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 46 | +// |
| 47 | +svmfloat8_t test_cvt_f32_x4(svfloat32x4_t zn, fpm_t fpmr) __arm_streaming { |
| 48 | + return SVE_ACLE_FUNC(svcvt_mf8,_f32_x4,_fpm)(zn, fpmr); |
| 49 | +} |
| 50 | + |
| 51 | +// CHECK-LABEL: @test_cvtn_f32_x4( |
| 52 | +// CHECK-NEXT: entry: |
| 53 | +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 54 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvtn.x4(<vscale x 4 x float> [[ZN_COERCE0:%.*]], <vscale x 4 x float> [[ZN_COERCE1:%.*]], <vscale x 4 x float> [[ZN_COERCE2:%.*]], <vscale x 4 x float> [[ZN_COERCE3:%.*]]) |
| 55 | +// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 56 | +// |
| 57 | +// CPP-CHECK-LABEL: @_Z16test_cvtn_f32_x413svfloat32x4_tm( |
| 58 | +// CPP-CHECK-NEXT: entry: |
| 59 | +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 60 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvtn.x4(<vscale x 4 x float> [[ZN_COERCE0:%.*]], <vscale x 4 x float> [[ZN_COERCE1:%.*]], <vscale x 4 x float> [[ZN_COERCE2:%.*]], <vscale x 4 x float> [[ZN_COERCE3:%.*]]) |
| 61 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 62 | +// |
| 63 | +svmfloat8_t test_cvtn_f32_x4(svfloat32x4_t zn, fpm_t fpmr) __arm_streaming { |
| 64 | + return SVE_ACLE_FUNC(svcvtn_mf8,_f32_x4,_fpm)(zn, fpmr); |
| 65 | +} |
| 66 | + |
| 67 | +// CHECK-LABEL: @test_cvt_bf16_x2( |
| 68 | +// CHECK-NEXT: entry: |
| 69 | +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 70 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8bf16(<vscale x 8 x bfloat> [[ZN_COERCE0:%.*]], <vscale x 8 x bfloat> [[ZN_COERCE1:%.*]]) |
| 71 | +// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 72 | +// |
| 73 | +// CPP-CHECK-LABEL: @_Z16test_cvt_bf16_x214svbfloat16x2_tm( |
| 74 | +// CPP-CHECK-NEXT: entry: |
| 75 | +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
| 76 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8bf16(<vscale x 8 x bfloat> [[ZN_COERCE0:%.*]], <vscale x 8 x bfloat> [[ZN_COERCE1:%.*]]) |
| 77 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| 78 | +// |
| 79 | +svmfloat8_t test_cvt_bf16_x2(svbfloat16x2_t zn, fpm_t fpmr) __arm_streaming { |
| 80 | + return SVE_ACLE_FUNC(svcvt_mf8,_bf16_x2,_fpm)(zn, fpmr); |
| 81 | +} |
| 82 | + |
19 | 83 | // CHECK-LABEL: @test_cvt1_f16_x2( |
20 | 84 | // CHECK-NEXT: entry: |
21 | 85 | // CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR:%.*]]) |
|
0 commit comments