Skip to content

Commit 8c5cab3

Browse files
committed
Update tests and add a oneuse check
1 parent db9d4ea commit 8c5cab3

File tree

3 files changed

+142
-137
lines changed

3 files changed

+142
-137
lines changed

llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10826,20 +10826,11 @@ static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
1082610826

1082710827
SDValue MulhRightOp;
1082810828
if (LeftOp.getOpcode() != RightOp.getOpcode()) {
10829-
if (ConstantSDNode *Constant = isConstOrConstSplat(RightOp)) {
10830-
unsigned ActiveBits = IsSignExt
10831-
? Constant->getAPIntValue().getSignificantBits()
10832-
: Constant->getAPIntValue().getActiveBits();
10833-
if (ActiveBits > NarrowVTSize)
10834-
return SDValue();
10835-
MulhRightOp = DAG.getConstant(
10836-
Constant->getAPIntValue().trunc(NarrowVT.getScalarSizeInBits()), DL,
10837-
NarrowVT);
10838-
} else if (IsZeroExt &&
10839-
DAG.computeKnownBits(RightOp).countMinLeadingZeros() >=
10840-
NarrowVTSize) {
10829+
if (IsZeroExt && ShiftOperand.hasOneUse() &&
10830+
DAG.computeKnownBits(RightOp).countMinLeadingZeros() >= NarrowVTSize) {
1084110831
MulhRightOp = DAG.getNode(ISD::TRUNCATE, DL, NarrowVT, RightOp);
10842-
} else if (IsSignExt && DAG.ComputeNumSignBits(RightOp) > NarrowVTSize) {
10832+
} else if (IsSignExt && ShiftOperand.hasOneUse() &&
10833+
DAG.ComputeNumSignBits(RightOp) > NarrowVTSize) {
1084310834
MulhRightOp = DAG.getNode(ISD::TRUNCATE, DL, NarrowVT, RightOp);
1084410835
} else {
1084510836
return SDValue();

llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll

Lines changed: 23 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -48,18 +48,11 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
4848
}
4949

5050
define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
51-
; RV32-LABEL: vmulhu_vi_nxv1i32_1:
52-
; RV32: # %bb.0:
53-
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
54-
; RV32-NEXT: vsrl.vi v8, v8, 28
55-
; RV32-NEXT: ret
56-
;
57-
; RV64-LABEL: vmulhu_vi_nxv1i32_1:
58-
; RV64: # %bb.0:
59-
; RV64-NEXT: li a0, 16
60-
; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
61-
; RV64-NEXT: vmulhu.vx v8, v8, a0
62-
; RV64-NEXT: ret
51+
; CHECK-LABEL: vmulhu_vi_nxv1i32_1:
52+
; CHECK: # %bb.0:
53+
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
54+
; CHECK-NEXT: vsrl.vi v8, v8, 28
55+
; CHECK-NEXT: ret
6356
%vb = zext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
6457
%vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
6558
%vd = mul <vscale x 1 x i64> %vb, %vc
@@ -114,18 +107,11 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
114107
}
115108

116109
define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
117-
; RV32-LABEL: vmulhu_vi_nxv2i32_1:
118-
; RV32: # %bb.0:
119-
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
120-
; RV32-NEXT: vsrl.vi v8, v8, 28
121-
; RV32-NEXT: ret
122-
;
123-
; RV64-LABEL: vmulhu_vi_nxv2i32_1:
124-
; RV64: # %bb.0:
125-
; RV64-NEXT: li a0, 16
126-
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
127-
; RV64-NEXT: vmulhu.vx v8, v8, a0
128-
; RV64-NEXT: ret
110+
; CHECK-LABEL: vmulhu_vi_nxv2i32_1:
111+
; CHECK: # %bb.0:
112+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
113+
; CHECK-NEXT: vsrl.vi v8, v8, 28
114+
; CHECK-NEXT: ret
129115
%vb = zext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
130116
%vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
131117
%vd = mul <vscale x 2 x i64> %vb, %vc
@@ -180,18 +166,11 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
180166
}
181167

182168
define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
183-
; RV32-LABEL: vmulhu_vi_nxv4i32_1:
184-
; RV32: # %bb.0:
185-
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
186-
; RV32-NEXT: vsrl.vi v8, v8, 28
187-
; RV32-NEXT: ret
188-
;
189-
; RV64-LABEL: vmulhu_vi_nxv4i32_1:
190-
; RV64: # %bb.0:
191-
; RV64-NEXT: li a0, 16
192-
; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma
193-
; RV64-NEXT: vmulhu.vx v8, v8, a0
194-
; RV64-NEXT: ret
169+
; CHECK-LABEL: vmulhu_vi_nxv4i32_1:
170+
; CHECK: # %bb.0:
171+
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
172+
; CHECK-NEXT: vsrl.vi v8, v8, 28
173+
; CHECK-NEXT: ret
195174
%vb = zext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
196175
%vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
197176
%vd = mul <vscale x 4 x i64> %vb, %vc
@@ -246,22 +225,18 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
246225
}
247226

248227
define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
249-
; RV32-LABEL: vmulhu_vi_nxv8i32_1:
250-
; RV32: # %bb.0:
251-
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
252-
; RV32-NEXT: vsrl.vi v8, v8, 28
253-
; RV32-NEXT: ret
254-
;
255-
; RV64-LABEL: vmulhu_vi_nxv8i32_1:
256-
; RV64: # %bb.0:
257-
; RV64-NEXT: li a0, 16
258-
; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma
259-
; RV64-NEXT: vmulhu.vx v8, v8, a0
260-
; RV64-NEXT: ret
228+
; CHECK-LABEL: vmulhu_vi_nxv8i32_1:
229+
; CHECK: # %bb.0:
230+
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
231+
; CHECK-NEXT: vsrl.vi v8, v8, 28
232+
; CHECK-NEXT: ret
261233
%vb = zext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
262234
%vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
263235
%vd = mul <vscale x 8 x i64> %vb, %vc
264236
%ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
265237
%vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
266238
ret <vscale x 8 x i32> %vf
267239
}
240+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
241+
; RV32: {{.*}}
242+
; RV64: {{.*}}

0 commit comments

Comments
 (0)