Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 15 additions & 3 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26193,9 +26193,21 @@ static SDValue performFlagSettingCombine(SDNode *N,
}

// Combine identical generic nodes into this node, re-using the result.
if (SDNode *Generic = DCI.DAG.getNodeIfExists(
GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
DCI.CombineTo(Generic, SDValue(N, 0));
auto CombineWithExistingGeneric = [&](SDValue Op0, SDValue Op1) {
if (SDNode *Generic = DCI.DAG.getNodeIfExists(
GenericOpcode, DCI.DAG.getVTList(VT), {Op0, Op1})) {
DCI.CombineTo(Generic, SDValue(N, 0));
return true;
}
return false;
};

if (CombineWithExistingGeneric(LHS, RHS))
return SDValue();

if (DCI.DAG.getTargetLoweringInfo().isCommutativeBinOp(GenericOpcode) &&
CombineWithExistingGeneric(RHS, LHS))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be better to extend getNodeIfExists to support commuting its operands? If you're worried some uses may expect the exact operand order you could add an AllowCommute or add a dedicated function with the ability. Doing this will make it trivial to migrate other call sites where commuting operands has value.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that's a good idea, would it be ok if I look at that as a follow-up?

Copy link
Collaborator

@paulwalker-arm paulwalker-arm Oct 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As discussion offline, it's not a strongly held view but it seems wrong to push this PR and then immediately revert it to implement my suggestion.

return SDValue();

return SDValue();
}
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/AArch64/adds_cmn.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@ entry:
define { i32, i32 } @adds_cmn_c(i32 noundef %x, i32 noundef %y) {
; CHECK-LABEL: adds_cmn_c:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cmn w0, w1
; CHECK-NEXT: add w1, w1, w0
; CHECK-NEXT: cset w8, lo
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: adds w1, w0, w1
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
entry:
%0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/AArch64/sat-add.ll
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,7 @@ define i32 @unsigned_sat_variable_i32_using_cmp_sum(i32 %x, i32 %y) {
define i32 @unsigned_sat_variable_i32_using_cmp_notval(i32 %x, i32 %y) {
; CHECK-LABEL: unsigned_sat_variable_i32_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: cmn w1, w0
; CHECK-NEXT: adds w8, w1, w0
; CHECK-NEXT: csinv w0, w8, wzr, lo
; CHECK-NEXT: ret
%noty = xor i32 %y, -1
Expand Down Expand Up @@ -331,8 +330,7 @@ define i64 @unsigned_sat_variable_i64_using_cmp_sum(i64 %x, i64 %y) {
define i64 @unsigned_sat_variable_i64_using_cmp_notval(i64 %x, i64 %y) {
; CHECK-LABEL: unsigned_sat_variable_i64_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: cmn x1, x0
; CHECK-NEXT: adds x8, x1, x0
; CHECK-NEXT: csinv x0, x8, xzr, lo
; CHECK-NEXT: ret
%noty = xor i64 %y, -1
Expand Down