@@ -198,3 +198,27 @@ define void @fadd_fcmp_select_copy(<vscale x 4 x float> %v, <vscale x 4 x i1> %c
198
198
call void @llvm.riscv.vsm (<vscale x 4 x i1 > %select , ptr %p , iXLen %vl )
199
199
ret void
200
200
}
201
+
202
+ define <vscale x 8 x i32 > @vcompress_cmp (<vscale x 8 x i32 > %a , <vscale x 8 x i32 > %b , <vscale x 8 x i32 > %c , iXLen %vl ) {
203
+ ; CHECK-LABEL: vcompress_cmp:
204
+ ; CHECK: # %bb.0:
205
+ ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
206
+ ; CHECK-NEXT: vmseq.vv v20, v8, v12
207
+ ; CHECK-NEXT: vcompress.vm v8, v16, v20
208
+ ; CHECK-NEXT: ret
209
+ %cmp = icmp eq <vscale x 8 x i32 > %a , %b
210
+ %compress = call <vscale x 8 x i32 > @llvm.riscv.vcompress.nxv8i32 (<vscale x 8 x i32 > poison, <vscale x 8 x i32 > %c , <vscale x 8 x i1 > %cmp , iXLen %vl )
211
+ ret <vscale x 8 x i32 > %compress
212
+ }
213
+
214
+ define <vscale x 8 x i32 > @vcompress_add (<vscale x 8 x i32 > %a , <vscale x 8 x i32 > %b , <vscale x 8 x i1 > %c , iXLen %vl ) {
215
+ ; CHECK-LABEL: vcompress_add:
216
+ ; CHECK: # %bb.0:
217
+ ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
218
+ ; CHECK-NEXT: vadd.vv v12, v8, v12
219
+ ; CHECK-NEXT: vcompress.vm v8, v12, v0
220
+ ; CHECK-NEXT: ret
221
+ %add = add <vscale x 8 x i32 > %a , %b
222
+ %compress = call <vscale x 8 x i32 > @llvm.riscv.vcompress.nxv8i32 (<vscale x 8 x i32 > poison, <vscale x 8 x i32 > %add , <vscale x 8 x i1 > %c , iXLen %vl )
223
+ ret <vscale x 8 x i32 > %compress
224
+ }
0 commit comments