2424
2525// CHECK-LABEL: @test_svrev_s8(
2626// CHECK-NEXT: entry:
27- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.rev .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
27+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.reverse .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
2828// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2929//
3030// CPP-CHECK-LABEL: @_Z13test_svrev_s8u10__SVInt8_t(
3131// CPP-CHECK-NEXT: entry:
32- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.rev .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
32+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.reverse .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
3333// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
3434//
3535svint8_t test_svrev_s8 (svint8_t op ) MODE_ATTR
@@ -39,12 +39,12 @@ svint8_t test_svrev_s8(svint8_t op) MODE_ATTR
3939
4040// CHECK-LABEL: @test_svrev_s16(
4141// CHECK-NEXT: entry:
42- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.rev .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
42+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.reverse .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
4343// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
4444//
4545// CPP-CHECK-LABEL: @_Z14test_svrev_s16u11__SVInt16_t(
4646// CPP-CHECK-NEXT: entry:
47- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.rev .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
47+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.reverse .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
4848// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
4949//
5050svint16_t test_svrev_s16 (svint16_t op ) MODE_ATTR
@@ -54,12 +54,12 @@ svint16_t test_svrev_s16(svint16_t op) MODE_ATTR
5454
5555// CHECK-LABEL: @test_svrev_s32(
5656// CHECK-NEXT: entry:
57- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.rev .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
57+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
5858// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
5959//
6060// CPP-CHECK-LABEL: @_Z14test_svrev_s32u11__SVInt32_t(
6161// CPP-CHECK-NEXT: entry:
62- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.rev .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
62+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
6363// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
6464//
6565svint32_t test_svrev_s32 (svint32_t op ) MODE_ATTR
@@ -69,12 +69,12 @@ svint32_t test_svrev_s32(svint32_t op) MODE_ATTR
6969
7070// CHECK-LABEL: @test_svrev_s64(
7171// CHECK-NEXT: entry:
72- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rev .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
72+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.reverse .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
7373// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
7474//
7575// CPP-CHECK-LABEL: @_Z14test_svrev_s64u11__SVInt64_t(
7676// CPP-CHECK-NEXT: entry:
77- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rev .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
77+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.reverse .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
7878// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
7979//
8080svint64_t test_svrev_s64 (svint64_t op ) MODE_ATTR
@@ -84,12 +84,12 @@ svint64_t test_svrev_s64(svint64_t op) MODE_ATTR
8484
8585// CHECK-LABEL: @test_svrev_u8(
8686// CHECK-NEXT: entry:
87- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.rev .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
87+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.reverse .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
8888// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
8989//
9090// CPP-CHECK-LABEL: @_Z13test_svrev_u8u11__SVUint8_t(
9191// CPP-CHECK-NEXT: entry:
92- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.rev .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
92+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.reverse .nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
9393// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
9494//
9595svuint8_t test_svrev_u8 (svuint8_t op ) MODE_ATTR
@@ -99,12 +99,12 @@ svuint8_t test_svrev_u8(svuint8_t op) MODE_ATTR
9999
100100// CHECK-LABEL: @test_svrev_u16(
101101// CHECK-NEXT: entry:
102- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.rev .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
102+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.reverse .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
103103// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
104104//
105105// CPP-CHECK-LABEL: @_Z14test_svrev_u16u12__SVUint16_t(
106106// CPP-CHECK-NEXT: entry:
107- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.rev .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
107+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.reverse .nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
108108// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
109109//
110110svuint16_t test_svrev_u16 (svuint16_t op ) MODE_ATTR
@@ -114,12 +114,12 @@ svuint16_t test_svrev_u16(svuint16_t op) MODE_ATTR
114114
115115// CHECK-LABEL: @test_svrev_u32(
116116// CHECK-NEXT: entry:
117- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.rev .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
117+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
118118// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
119119//
120120// CPP-CHECK-LABEL: @_Z14test_svrev_u32u12__SVUint32_t(
121121// CPP-CHECK-NEXT: entry:
122- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.rev .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
122+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse .nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
123123// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
124124//
125125svuint32_t test_svrev_u32 (svuint32_t op ) MODE_ATTR
@@ -129,12 +129,12 @@ svuint32_t test_svrev_u32(svuint32_t op) MODE_ATTR
129129
130130// CHECK-LABEL: @test_svrev_u64(
131131// CHECK-NEXT: entry:
132- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rev .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
132+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.reverse .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
133133// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
134134//
135135// CPP-CHECK-LABEL: @_Z14test_svrev_u64u12__SVUint64_t(
136136// CPP-CHECK-NEXT: entry:
137- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rev .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
137+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.reverse .nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
138138// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
139139//
140140svuint64_t test_svrev_u64 (svuint64_t op ) MODE_ATTR
@@ -144,12 +144,12 @@ svuint64_t test_svrev_u64(svuint64_t op) MODE_ATTR
144144
145145// CHECK-LABEL: @test_svrev_f16(
146146// CHECK-NEXT: entry:
147- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.rev .nxv8f16(<vscale x 8 x half> [[OP:%.*]])
147+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.reverse .nxv8f16(<vscale x 8 x half> [[OP:%.*]])
148148// CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
149149//
150150// CPP-CHECK-LABEL: @_Z14test_svrev_f16u13__SVFloat16_t(
151151// CPP-CHECK-NEXT: entry:
152- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.rev .nxv8f16(<vscale x 8 x half> [[OP:%.*]])
152+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.reverse .nxv8f16(<vscale x 8 x half> [[OP:%.*]])
153153// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
154154//
155155svfloat16_t test_svrev_f16 (svfloat16_t op ) MODE_ATTR
@@ -159,12 +159,12 @@ svfloat16_t test_svrev_f16(svfloat16_t op) MODE_ATTR
159159
160160// CHECK-LABEL: @test_svrev_f32(
161161// CHECK-NEXT: entry:
162- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.rev .nxv4f32(<vscale x 4 x float> [[OP:%.*]])
162+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse .nxv4f32(<vscale x 4 x float> [[OP:%.*]])
163163// CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
164164//
165165// CPP-CHECK-LABEL: @_Z14test_svrev_f32u13__SVFloat32_t(
166166// CPP-CHECK-NEXT: entry:
167- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.rev .nxv4f32(<vscale x 4 x float> [[OP:%.*]])
167+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse .nxv4f32(<vscale x 4 x float> [[OP:%.*]])
168168// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
169169//
170170svfloat32_t test_svrev_f32 (svfloat32_t op ) MODE_ATTR
@@ -174,12 +174,12 @@ svfloat32_t test_svrev_f32(svfloat32_t op) MODE_ATTR
174174
175175// CHECK-LABEL: @test_svrev_f64(
176176// CHECK-NEXT: entry:
177- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.rev .nxv2f64(<vscale x 2 x double> [[OP:%.*]])
177+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.reverse .nxv2f64(<vscale x 2 x double> [[OP:%.*]])
178178// CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
179179//
180180// CPP-CHECK-LABEL: @_Z14test_svrev_f64u13__SVFloat64_t(
181181// CPP-CHECK-NEXT: entry:
182- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.rev .nxv2f64(<vscale x 2 x double> [[OP:%.*]])
182+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.reverse .nxv2f64(<vscale x 2 x double> [[OP:%.*]])
183183// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
184184//
185185svfloat64_t test_svrev_f64 (svfloat64_t op ) MODE_ATTR
@@ -189,12 +189,12 @@ svfloat64_t test_svrev_f64(svfloat64_t op) MODE_ATTR
189189
190190// CHECK-LABEL: @test_svrev_b8(
191191// CHECK-NEXT: entry:
192- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.rev .nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
192+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.vector.reverse .nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
193193// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
194194//
195195// CPP-CHECK-LABEL: @_Z13test_svrev_b8u10__SVBool_t(
196196// CPP-CHECK-NEXT: entry:
197- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.rev .nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
197+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.vector.reverse .nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
198198// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
199199//
200200svbool_t test_svrev_b8 (svbool_t op ) MODE_ATTR
@@ -249,12 +249,12 @@ svbool_t test_svrev_b64(svbool_t op) MODE_ATTR
249249
250250// CHECK-LABEL: @test_svrev_bf16(
251251// CHECK-NEXT: entry:
252- // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.rev .nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
252+ // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.reverse .nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
253253// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
254254//
255255// CPP-CHECK-LABEL: @_Z15test_svrev_bf16u14__SVBfloat16_t(
256256// CPP-CHECK-NEXT: entry:
257- // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.rev .nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
257+ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.reverse .nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
258258// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
259259//
260260svbfloat16_t test_svrev_bf16 (svbfloat16_t op ) MODE_ATTR
0 commit comments