@@ -392,35 +392,33 @@ llvm.func @arm_sve_psel(%pn: vector<[16]xi1>, %p1: vector<[2]xi1>, %p2: vector<[
392392}
393393
394394// CHECK-LABEL: @arm_sve_dupq_lane
395- // CHECK-SAME: <vscale x 16 x i8> %0
396- // CHECK-SAME: <vscale x 8 x i16> %1
397- // CHECK-SAME: <vscale x 8 x half> %2
398- // CHECK-SAME: <vscale x 8 x bfloat> %3
399- // CHECK-SAME: <vscale x 4 x i32> %4
400- // CHECK-SAME: <vscale x 4 x float> %5
401- // CHECK-SAME: <vscale x 2 x i64> %6
402- // CHECK-SAME: <vscale x 2 x double> %7
403-
404-
405- llvm.func @arm_sve_dupq_lane (%arg0: vector <[16 ]xi8 >, %arg1: vector <[8 ]xi16 >,
406- %arg2: vector <[8 ]xf16 >, %arg3: vector <[8 ]xbf16 >,
407- %arg4: vector <[4 ]xi32 >,%arg5: vector <[4 ]xf32 >,
408- %arg6: vector <[2 ]xi64 >, %arg7: vector <[2 ]xf64 >) {
409- // CHECK: call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %0, i64 0)
410- %0 = " arm_sve.intr.dupq_lane" (%arg0 ) <{lane = 0 : i64 }> : (vector <[16 ]xi8 >) -> vector <[16 ]xi8 >
411- // CHECK: call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 1)
412- %1 = " arm_sve.intr.dupq_lane" (%arg1 ) <{lane = 1 : i64 }> : (vector <[8 ]xi16 >) -> vector <[8 ]xi16 >
413- // CHECK: call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %2, i64 2)
414- %2 = " arm_sve.intr.dupq_lane" (%arg2 ) <{lane = 2 : i64 }> : (vector <[8 ]xf16 >) -> vector <[8 ]xf16 >
415- // CHECK: call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %3, i64 3)
416- %3 = " arm_sve.intr.dupq_lane" (%arg3 ) <{lane = 3 : i64 }> : (vector <[8 ]xbf16 >) -> vector <[8 ]xbf16 >
417- // CHECK: call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %4, i64 4)
418- %4 = " arm_sve.intr.dupq_lane" (%arg4 ) <{lane = 4 : i64 }> : (vector <[4 ]xi32 >) -> vector <[4 ]xi32 >
419- // CHECK: call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %5, i64 5)
420- %5 = " arm_sve.intr.dupq_lane" (%arg5 ) <{lane = 5 : i64 }> : (vector <[4 ]xf32 >) -> vector <[4 ]xf32 >
421- // CHECK: call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %6, i64 6)
422- %6 = " arm_sve.intr.dupq_lane" (%arg6 ) <{lane = 6 : i64 }> : (vector <[2 ]xi64 >) -> vector <[2 ]xi64 >
423- // CHECK: call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %7, i64 7)
424- %7 = " arm_sve.intr.dupq_lane" (%arg7 ) <{lane = 7 : i64 }> : (vector <[2 ]xf64 >) -> vector <[2 ]xf64 >
395+ // CHECK-SAME: <vscale x 16 x i8> %[[V0:[0-9]+]]
396+ // CHECK-SAME: <vscale x 8 x i16> %[[V1:[0-9]+]]
397+ // CHECK-SAME: <vscale x 8 x half> %[[V2:[0-9]+]]
398+ // CHECK-SAME: <vscale x 8 x bfloat> %[[V3:[0-9]+]]
399+ // CHECK-SAME: <vscale x 4 x i32> %[[V4:[0-9]+]]
400+ // CHECK-SAME: <vscale x 4 x float> %[[V5:[0-9]+]]
401+ // CHECK-SAME: <vscale x 2 x i64> %[[V6:[0-9]+]]
402+ // CHECK-SAME: <vscale x 2 x double> %[[V7:[0-9]+]]
403+ llvm.func @arm_sve_dupq_lane (%nxv16i8: vector <[16 ]xi8 >, %nxv8i16: vector <[8 ]xi16 >,
404+ %nxv8f16: vector <[8 ]xf16 >, %nxv8bf16: vector <[8 ]xbf16 >,
405+ %nxv4i32: vector <[4 ]xi32 >, %nxv4f32: vector <[4 ]xf32 >,
406+ %nxv2i64: vector <[2 ]xi64 >, %nxv2f64: vector <[2 ]xf64 >) {
407+ // CHECK: call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %[[V0]], i64 0)
408+ %0 = " arm_sve.intr.dupq_lane" (%nxv16i8 ) <{lane = 0 : i64 }> : (vector <[16 ]xi8 >) -> vector <[16 ]xi8 >
409+ // CHECK: call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %[[V1]], i64 1)
410+ %1 = " arm_sve.intr.dupq_lane" (%nxv8i16 ) <{lane = 1 : i64 }> : (vector <[8 ]xi16 >) -> vector <[8 ]xi16 >
411+ // CHECK: call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %[[V2]], i64 2)
412+ %2 = " arm_sve.intr.dupq_lane" (%nxv8f16 ) <{lane = 2 : i64 }> : (vector <[8 ]xf16 >) -> vector <[8 ]xf16 >
413+ // CHECK: call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %[[V3]], i64 3)
414+ %3 = " arm_sve.intr.dupq_lane" (%nxv8bf16 ) <{lane = 3 : i64 }> : (vector <[8 ]xbf16 >) -> vector <[8 ]xbf16 >
415+ // CHECK: call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %[[V4]], i64 4)
416+ %4 = " arm_sve.intr.dupq_lane" (%nxv4i32 ) <{lane = 4 : i64 }> : (vector <[4 ]xi32 >) -> vector <[4 ]xi32 >
417+ // CHECK: call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %[[V5]], i64 5)
418+ %5 = " arm_sve.intr.dupq_lane" (%nxv4f32 ) <{lane = 5 : i64 }> : (vector <[4 ]xf32 >) -> vector <[4 ]xf32 >
419+ // CHECK: call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %[[V6]], i64 6)
420+ %6 = " arm_sve.intr.dupq_lane" (%nxv2i64 ) <{lane = 6 : i64 }> : (vector <[2 ]xi64 >) -> vector <[2 ]xi64 >
421+ // CHECK: call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %[[V7]], i64 7)
422+ %7 = " arm_sve.intr.dupq_lane" (%nxv2f64 ) <{lane = 7 : i64 }> : (vector <[2 ]xf64 >) -> vector <[2 ]xf64 >
425423 llvm.return
426424}
0 commit comments