@@ -48,18 +48,11 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
4848}
4949
5050define <vscale x 1 x i32 > @vmulhu_vi_nxv1i32_1 (<vscale x 1 x i32 > %va ) {
51- ; RV32-LABEL: vmulhu_vi_nxv1i32_1:
52- ; RV32: # %bb.0:
53- ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
54- ; RV32-NEXT: vsrl.vi v8, v8, 28
55- ; RV32-NEXT: ret
56- ;
57- ; RV64-LABEL: vmulhu_vi_nxv1i32_1:
58- ; RV64: # %bb.0:
59- ; RV64-NEXT: li a0, 16
60- ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
61- ; RV64-NEXT: vmulhu.vx v8, v8, a0
62- ; RV64-NEXT: ret
51+ ; CHECK-LABEL: vmulhu_vi_nxv1i32_1:
52+ ; CHECK: # %bb.0:
53+ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
54+ ; CHECK-NEXT: vsrl.vi v8, v8, 28
55+ ; CHECK-NEXT: ret
6356 %vb = zext <vscale x 1 x i32 > splat (i32 16 ) to <vscale x 1 x i64 >
6457 %vc = zext <vscale x 1 x i32 > %va to <vscale x 1 x i64 >
6558 %vd = mul <vscale x 1 x i64 > %vb , %vc
@@ -114,18 +107,11 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
114107}
115108
116109define <vscale x 2 x i32 > @vmulhu_vi_nxv2i32_1 (<vscale x 2 x i32 > %va ) {
117- ; RV32-LABEL: vmulhu_vi_nxv2i32_1:
118- ; RV32: # %bb.0:
119- ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
120- ; RV32-NEXT: vsrl.vi v8, v8, 28
121- ; RV32-NEXT: ret
122- ;
123- ; RV64-LABEL: vmulhu_vi_nxv2i32_1:
124- ; RV64: # %bb.0:
125- ; RV64-NEXT: li a0, 16
126- ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
127- ; RV64-NEXT: vmulhu.vx v8, v8, a0
128- ; RV64-NEXT: ret
110+ ; CHECK-LABEL: vmulhu_vi_nxv2i32_1:
111+ ; CHECK: # %bb.0:
112+ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
113+ ; CHECK-NEXT: vsrl.vi v8, v8, 28
114+ ; CHECK-NEXT: ret
129115 %vb = zext <vscale x 2 x i32 > splat (i32 16 ) to <vscale x 2 x i64 >
130116 %vc = zext <vscale x 2 x i32 > %va to <vscale x 2 x i64 >
131117 %vd = mul <vscale x 2 x i64 > %vb , %vc
@@ -180,18 +166,11 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
180166}
181167
182168define <vscale x 4 x i32 > @vmulhu_vi_nxv4i32_1 (<vscale x 4 x i32 > %va ) {
183- ; RV32-LABEL: vmulhu_vi_nxv4i32_1:
184- ; RV32: # %bb.0:
185- ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
186- ; RV32-NEXT: vsrl.vi v8, v8, 28
187- ; RV32-NEXT: ret
188- ;
189- ; RV64-LABEL: vmulhu_vi_nxv4i32_1:
190- ; RV64: # %bb.0:
191- ; RV64-NEXT: li a0, 16
192- ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma
193- ; RV64-NEXT: vmulhu.vx v8, v8, a0
194- ; RV64-NEXT: ret
169+ ; CHECK-LABEL: vmulhu_vi_nxv4i32_1:
170+ ; CHECK: # %bb.0:
171+ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
172+ ; CHECK-NEXT: vsrl.vi v8, v8, 28
173+ ; CHECK-NEXT: ret
195174 %vb = zext <vscale x 4 x i32 > splat (i32 16 ) to <vscale x 4 x i64 >
196175 %vc = zext <vscale x 4 x i32 > %va to <vscale x 4 x i64 >
197176 %vd = mul <vscale x 4 x i64 > %vb , %vc
@@ -246,22 +225,18 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
246225}
247226
248227define <vscale x 8 x i32 > @vmulhu_vi_nxv8i32_1 (<vscale x 8 x i32 > %va ) {
249- ; RV32-LABEL: vmulhu_vi_nxv8i32_1:
250- ; RV32: # %bb.0:
251- ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
252- ; RV32-NEXT: vsrl.vi v8, v8, 28
253- ; RV32-NEXT: ret
254- ;
255- ; RV64-LABEL: vmulhu_vi_nxv8i32_1:
256- ; RV64: # %bb.0:
257- ; RV64-NEXT: li a0, 16
258- ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma
259- ; RV64-NEXT: vmulhu.vx v8, v8, a0
260- ; RV64-NEXT: ret
228+ ; CHECK-LABEL: vmulhu_vi_nxv8i32_1:
229+ ; CHECK: # %bb.0:
230+ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
231+ ; CHECK-NEXT: vsrl.vi v8, v8, 28
232+ ; CHECK-NEXT: ret
261233 %vb = zext <vscale x 8 x i32 > splat (i32 16 ) to <vscale x 8 x i64 >
262234 %vc = zext <vscale x 8 x i32 > %va to <vscale x 8 x i64 >
263235 %vd = mul <vscale x 8 x i64 > %vb , %vc
264236 %ve = lshr <vscale x 8 x i64 > %vd , splat (i64 32 )
265237 %vf = trunc <vscale x 8 x i64 > %ve to <vscale x 8 x i32 >
266238 ret <vscale x 8 x i32 > %vf
267239}
240+ ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
241+ ; RV32: {{.*}}
242+ ; RV64: {{.*}}
0 commit comments