Skip to content

Commit 9c02d66

Browse files
authored
[LegalizeTypes][VP] Teach isVPBinaryOp to recognize vp.sadd/saddu/ssub/ssubu.sat (#154047)
Those vp intrinsics also are vp binary operations. Similar to https://reviews.llvm.org/D135753.
1 parent 4a3b699 commit 9c02d66

File tree

5 files changed

+4
-48
lines changed

5 files changed

+4
-48
lines changed

llvm/include/llvm/IR/VPIntrinsics.def

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,24 +278,28 @@ END_REGISTER_VP(vp_fshr, VP_FSHR)
278278

279279
// llvm.vp.sadd.sat(x,y,mask,vlen)
280280
BEGIN_REGISTER_VP(vp_sadd_sat, 2, 3, VP_SADDSAT, -1)
281+
VP_PROPERTY_BINARYOP
281282
VP_PROPERTY_FUNCTIONAL_INTRINSIC(sadd_sat)
282283
VP_PROPERTY_FUNCTIONAL_SDOPC(SADDSAT)
283284
END_REGISTER_VP(vp_sadd_sat, VP_SADDSAT)
284285

285286
// llvm.vp.uadd.sat(x,y,mask,vlen)
286287
BEGIN_REGISTER_VP(vp_uadd_sat, 2, 3, VP_UADDSAT, -1)
288+
VP_PROPERTY_BINARYOP
287289
VP_PROPERTY_FUNCTIONAL_INTRINSIC(uadd_sat)
288290
VP_PROPERTY_FUNCTIONAL_SDOPC(UADDSAT)
289291
END_REGISTER_VP(vp_uadd_sat, VP_UADDSAT)
290292

291293
// llvm.vp.ssub.sat(x,y,mask,vlen)
292294
BEGIN_REGISTER_VP(vp_ssub_sat, 2, 3, VP_SSUBSAT, -1)
295+
VP_PROPERTY_BINARYOP
293296
VP_PROPERTY_FUNCTIONAL_INTRINSIC(ssub_sat)
294297
VP_PROPERTY_FUNCTIONAL_SDOPC(SSUBSAT)
295298
END_REGISTER_VP(vp_ssub_sat, VP_SSUBSAT)
296299

297300
// llvm.vp.usub.sat(x,y,mask,vlen)
298301
BEGIN_REGISTER_VP(vp_usub_sat, 2, 3, VP_USUBSAT, -1)
302+
VP_PROPERTY_BINARYOP
299303
VP_PROPERTY_FUNCTIONAL_INTRINSIC(usub_sat)
300304
VP_PROPERTY_FUNCTIONAL_SDOPC(USUBSAT)
301305
END_REGISTER_VP(vp_usub_sat, VP_USUBSAT)

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -434,19 +434,12 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
434434
ret <256 x i8> %v
435435
}
436436

437-
; FIXME: The upper half is doing nothing.
438-
439437
define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
440438
; CHECK-LABEL: vsadd_vi_v258i8_evl128:
441439
; CHECK: # %bb.0:
442-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
443-
; CHECK-NEXT: vlm.v v24, (a0)
444440
; CHECK-NEXT: li a0, 128
445441
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
446442
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
447-
; CHECK-NEXT: vmv1r.v v0, v24
448-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
449-
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
450443
; CHECK-NEXT: ret
451444
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
452445
ret <256 x i8> %v
@@ -1418,13 +1411,8 @@ define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
14181411
define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
14191412
; CHECK-LABEL: vsadd_vx_v32i64_evl12:
14201413
; CHECK: # %bb.0:
1421-
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
1422-
; CHECK-NEXT: vslidedown.vi v24, v0, 2
14231414
; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma
14241415
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
1425-
; CHECK-NEXT: vmv1r.v v0, v24
1426-
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma
1427-
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
14281416
; CHECK-NEXT: ret
14291417
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
14301418
ret <32 x i64> %v

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -430,19 +430,12 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
430430
ret <256 x i8> %v
431431
}
432432

433-
; FIXME: The upper half is doing nothing.
434-
435433
define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
436434
; CHECK-LABEL: vsaddu_vi_v258i8_evl128:
437435
; CHECK: # %bb.0:
438-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
439-
; CHECK-NEXT: vlm.v v24, (a0)
440436
; CHECK-NEXT: li a0, 128
441437
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
442438
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
443-
; CHECK-NEXT: vmv1r.v v0, v24
444-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
445-
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
446439
; CHECK-NEXT: ret
447440
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
448441
ret <256 x i8> %v
@@ -1414,13 +1407,8 @@ define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
14141407
define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
14151408
; CHECK-LABEL: vsaddu_vx_v32i64_evl12:
14161409
; CHECK: # %bb.0:
1417-
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
1418-
; CHECK-NEXT: vslidedown.vi v24, v0, 2
14191410
; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma
14201411
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
1421-
; CHECK-NEXT: vmv1r.v v0, v24
1422-
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma
1423-
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
14241412
; CHECK-NEXT: ret
14251413
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
14261414
ret <32 x i64> %v

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -449,20 +449,13 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
449449
ret <256 x i8> %v
450450
}
451451

452-
; FIXME: The upper half is doing nothing.
453-
454452
define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
455453
; CHECK-LABEL: vssub_vi_v258i8_evl128:
456454
; CHECK: # %bb.0:
457-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
458-
; CHECK-NEXT: vlm.v v24, (a0)
459455
; CHECK-NEXT: li a0, 128
460456
; CHECK-NEXT: li a1, -1
461457
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
462458
; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
463-
; CHECK-NEXT: vmv1r.v v0, v24
464-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
465-
; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t
466459
; CHECK-NEXT: ret
467460
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
468461
ret <256 x i8> %v
@@ -1460,14 +1453,9 @@ define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
14601453
define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
14611454
; CHECK-LABEL: vssub_vx_v32i64_evl12:
14621455
; CHECK: # %bb.0:
1463-
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
1464-
; CHECK-NEXT: vslidedown.vi v24, v0, 2
14651456
; CHECK-NEXT: li a0, -1
14661457
; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma
14671458
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
1468-
; CHECK-NEXT: vmv1r.v v0, v24
1469-
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma
1470-
; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
14711459
; CHECK-NEXT: ret
14721460
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
14731461
ret <32 x i64> %v

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -444,20 +444,13 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
444444
ret <256 x i8> %v
445445
}
446446

447-
; FIXME: The upper half is doing nothing.
448-
449447
define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
450448
; CHECK-LABEL: vssubu_vi_v258i8_evl128:
451449
; CHECK: # %bb.0:
452-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
453-
; CHECK-NEXT: vlm.v v24, (a0)
454450
; CHECK-NEXT: li a0, 128
455451
; CHECK-NEXT: li a1, -1
456452
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
457453
; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
458-
; CHECK-NEXT: vmv1r.v v0, v24
459-
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
460-
; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t
461454
; CHECK-NEXT: ret
462455
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
463456
ret <256 x i8> %v
@@ -1455,14 +1448,9 @@ define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
14551448
define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
14561449
; CHECK-LABEL: vssubu_vx_v32i64_evl12:
14571450
; CHECK: # %bb.0:
1458-
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
1459-
; CHECK-NEXT: vslidedown.vi v24, v0, 2
14601451
; CHECK-NEXT: li a0, -1
14611452
; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma
14621453
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
1463-
; CHECK-NEXT: vmv1r.v v0, v24
1464-
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma
1465-
; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
14661454
; CHECK-NEXT: ret
14671455
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
14681456
ret <32 x i64> %v

0 commit comments

Comments
 (0)