Skip to content
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -6805,6 +6805,83 @@ defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ftrunc, "F
defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fround, "FCVTAS">;
defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fround, "FCVTAU">;

// For global-isel we can use register classes to determine
// which FCVT instruction to use.
let Predicates = [HasFPRCVT] in {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure if it will help, but maybe we could split the PR into patterns only for GlobalISel and another for the standard lowering/SelectionDAG?
I imagine that the ones from lines:
6810 till 6823
and
6848 till 6861
are only for GlobalISel while the other ones are for SelectionDAG

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes all the patterns without bitconvert node are for GlobalISel. May I ask what would be the value in splitting the patch ? As there is only couple of GlobalISel patterns added and the tests can be shared anyway, it makes sense to me to do it in one PR, but I can do that if you insist. The previous patches were also not split and it was fine.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The main reason was to solve the issue raised by @arsenm about the complexity to review of the patch.

def : Pat<(i64 (any_lround f16:$Rn)),
(FCVTASDHr f16:$Rn)>;
def : Pat<(i64 (any_llround f16:$Rn)),
(FCVTASDHr f16:$Rn)>;
def : Pat<(i64 (any_lround f32:$Rn)),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we not add pattern for the GlobalISel too. It seams to me that all the tests are always falling back to the selection dag.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not exactly sure, I understand your comment. If you are asking whether we should add patterns here for other type of lround, like for example f16 to i64 type, I don't think that is necessary as GlobalISel doesn't support those types anyway yet for these nodes. That's why you see it fallback to SDAG.

(FCVTASDSr f32:$Rn)>;
def : Pat<(i64 (any_llround f32:$Rn)),
(FCVTASDSr f32:$Rn)>;
}
def : Pat<(i64 (any_lround f64:$Rn)),
(FCVTASv1i64 f64:$Rn)>;
def : Pat<(i64 (any_llround f64:$Rn)),
(FCVTASv1i64 f64:$Rn)>;

let Predicates = [HasFPRCVT] in {
def : Pat<(f32 (bitconvert (i32 (any_lround f16:$Rn)))),
(FCVTASSHr f16:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_lround f16:$Rn)))),
(FCVTASDHr f16:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_llround f16:$Rn)))),
(FCVTASDHr f16:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_lround f32:$Rn)))),
(FCVTASDSr f32:$Rn)>;
def : Pat<(f32 (bitconvert (i32 (any_lround f64:$Rn)))),
(FCVTASSDr f64:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_llround f32:$Rn)))),
(FCVTASDSr f32:$Rn)>;
}
def : Pat<(f32 (bitconvert (i32 (any_lround f32:$Rn)))),
(FCVTASv1i32 f32:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_lround f64:$Rn)))),
(FCVTASv1i64 f64:$Rn)>;
def : Pat<(f64 (bitconvert (i64 (any_llround f64:$Rn)))),
(FCVTASv1i64 f64:$Rn)>;

// For global-isel we can use register classes to determine
// which FCVT instruction to use.
let Predicates = [HasFPRCVT] in {
def : Pat<(i64 (any_lrint f16:$Rn)),
(FCVTZSDHr (FRINTXHr f16:$Rn))>;
def : Pat<(i64 (any_llrint f16:$Rn)),
(FCVTZSDHr (FRINTXHr f16:$Rn))>;
def : Pat<(i64 (any_lrint f32:$Rn)),
(FCVTZSDSr (FRINTXSr f32:$Rn))>;
def : Pat<(i64 (any_llrint f32:$Rn)),
(FCVTZSDSr (FRINTXSr f32:$Rn))>;
}
def : Pat<(i64 (any_lrint f64:$Rn)),
(FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
def : Pat<(i64 (any_llrint f64:$Rn)),
(FCVTZSv1i64 (FRINTXDr f64:$Rn))>;

let Predicates = [HasFPRCVT] in {
def : Pat<(f32 (bitconvert (i32 (any_lrint f16:$Rn)))),
(FCVTZSSHr (FRINTXHr f16:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_lrint f16:$Rn)))),
(FCVTZSDHr (FRINTXHr f16:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_llrint f16:$Rn)))),
(FCVTZSDHr (FRINTXHr f16:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_lrint f32:$Rn)))),
(FCVTZSDSr (FRINTXSr f32:$Rn))>;
def : Pat<(f32 (bitconvert (i32 (any_lrint f64:$Rn)))),
(FCVTZSSDr (FRINTXDr f64:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_llrint f32:$Rn)))),
(FCVTZSDSr (FRINTXSr f32:$Rn))>;
}
def : Pat<(f32 (bitconvert (i32 (any_lrint f32:$Rn)))),
(FCVTZSv1i32 (FRINTXSr f32:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_lrint f64:$Rn)))),
(FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
def : Pat<(f64 (bitconvert (i64 (any_llrint f64:$Rn)))),
(FCVTZSv1i64 (FRINTXDr f64:$Rn))>;


// f16 -> s16 conversions
let Predicates = [HasFullFP16] in {
def : Pat<(i16(fp_to_sint_sat_gi f16:$Rn)), (FCVTZSv1f16 f16:$Rn)>;
Expand Down
18 changes: 5 additions & 13 deletions llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,11 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_FPTOSI_SAT:
case TargetOpcode::G_FPTOUI_SAT:
case TargetOpcode::G_FPTOSI:
case TargetOpcode::G_FPTOUI: {
case TargetOpcode::G_FPTOUI:
case TargetOpcode::G_INTRINSIC_LRINT:
case TargetOpcode::G_INTRINSIC_LLRINT:
case TargetOpcode::G_LROUND:
case TargetOpcode::G_LLROUND: {
LLT DstType = MRI.getType(MI.getOperand(0).getReg());
if (DstType.isVector())
break;
Expand All @@ -880,12 +884,6 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
break;
}
case TargetOpcode::G_INTRINSIC_LRINT:
case TargetOpcode::G_INTRINSIC_LLRINT:
if (MRI.getType(MI.getOperand(0).getReg()).isVector())
break;
OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
break;
case TargetOpcode::G_FCMP: {
// If the result is a vector, it must use a FPR.
AArch64GenRegisterBankInfo::PartialMappingIdx Idx0 =
Expand Down Expand Up @@ -1225,12 +1223,6 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
break;
}
case TargetOpcode::G_LROUND:
case TargetOpcode::G_LLROUND: {
// Source is always floating point and destination is always integer.
OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
break;
}
}

// Finally construct the computed mapping.
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ body: |
; CHECK: liveins: $d0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
; CHECK-NEXT: %llround:gpr(s64) = G_LLROUND %fpr(s64)
; CHECK-NEXT: %llround:fpr(s64) = G_LLROUND %fpr(s64)
; CHECK-NEXT: $d0 = COPY %llround(s64)
; CHECK-NEXT: RET_ReallyLR implicit $s0
%fpr:_(s64) = COPY $d0
Expand All @@ -35,7 +35,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %gpr:gpr(s64) = COPY $x0
; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
; CHECK-NEXT: %llround:gpr(s64) = G_LLROUND [[COPY]](s64)
; CHECK-NEXT: %llround:fpr(s64) = G_LLROUND [[COPY]](s64)
; CHECK-NEXT: $d0 = COPY %llround(s64)
; CHECK-NEXT: RET_ReallyLR implicit $s0
%gpr:_(s64) = COPY $x0
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ body: |
; CHECK: liveins: $d0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
; CHECK-NEXT: %lround:gpr(s64) = G_LROUND %fpr(s64)
; CHECK-NEXT: %lround:fpr(s64) = G_LROUND %fpr(s64)
; CHECK-NEXT: $d0 = COPY %lround(s64)
; CHECK-NEXT: RET_ReallyLR implicit $s0
%fpr:_(s64) = COPY $d0
Expand All @@ -35,7 +35,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %gpr:gpr(s64) = COPY $x0
; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
; CHECK-NEXT: %lround:gpr(s64) = G_LROUND [[COPY]](s64)
; CHECK-NEXT: %lround:fpr(s64) = G_LROUND [[COPY]](s64)
; CHECK-NEXT: $d0 = COPY %lround(s64)
; CHECK-NEXT: RET_ReallyLR implicit $s0
%gpr:_(s64) = COPY $x0
Expand Down
199 changes: 199 additions & 0 deletions llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK

;
; (L/LL)Round experimental
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

experimental -> constrained fp or strictfp.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

;

define float @lround_i32_f16_simd_exp(half %x) {
; CHECK-LABEL: lround_i32_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas s0, h0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict")
%sum = bitcast i32 %val to float
ret float %sum
}

define double @lround_i64_f16_simd_exp(half %x) {
; CHECK-LABEL: lround_i64_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, h0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lround.i64.f16(half %x, metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @lround_i64_f32_simd_exp(float %x) {
; CHECK-LABEL: lround_i64_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, s0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lround.i64.f32(float %x, metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define float @lround_i32_f64_simd_exp(double %x) {
; CHECK-LABEL: lround_i32_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas s0, d0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict")
%bc = bitcast i32 %val to float
ret float %bc
}

define float @lround_i32_f32_simd_exp(float %x) {
; CHECK-LABEL: lround_i32_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas s0, s0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict")
%bc = bitcast i32 %val to float
ret float %bc
}

define double @lround_i64_f64_simd_exp(double %x) {
; CHECK-LABEL: lround_i64_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, d0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lround.i64.f64(double %x, metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @llround_i64_f16_simd_exp(half %x) {
; CHECK-LABEL: llround_i64_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, h0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict")
%sum = bitcast i64 %val to double
ret double %sum
}

define double @llround_i64_f32_simd_exp(float %x) {
; CHECK-LABEL: llround_i64_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, s0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @llround_i64_f64_simd_exp(double %x) {
; CHECK-LABEL: llround_i64_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvtas d0, d0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

;
; (L/LL)Rint experimental
;

define float @lrint_i32_f16_simd_exp(half %x) {
; CHECK-LABEL: lrint_i32_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx h0, h0
; CHECK-NEXT: fcvtzs s0, h0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%sum = bitcast i32 %val to float
ret float %sum
}

define double @lrint_i64_f16_simd_exp(half %x) {
; CHECK-LABEL: lrint_i64_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx h0, h0
; CHECK-NEXT: fcvtzs d0, h0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lrint.i53.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @lrint_i64_f32_simd_exp(float %x) {
; CHECK-LABEL: lrint_i64_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs d0, s0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define float @lrint_i32_f64_simd_exp(double %x) {
; CHECK-LABEL: lrint_i32_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: fcvtzs s0, d0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i32 %val to float
ret float %bc
}

define float @lrint_i32_f32_simd_exp(float %x) {
; CHECK-LABEL: lrint_i32_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs s0, s0
; CHECK-NEXT: ret
%val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i32 %val to float
ret float %bc
}

define double @lrint_i64_f64_simd_exp(double %x) {
; CHECK-LABEL: lrint_i64_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: fcvtzs d0, d0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.lrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @llrint_i64_f16_simd_exp(half %x) {
; CHECK-LABEL: llrint_i64_f16_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx h0, h0
; CHECK-NEXT: fcvtzs d0, h0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%sum = bitcast i64 %val to double
ret double %sum
}

define double @llrint_i64_f32_simd_exp(float %x) {
; CHECK-LABEL: llrint_i64_f32_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs d0, s0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}

define double @llrint_i64_f64_simd_exp(double %x) {
; CHECK-LABEL: llrint_i64_f64_simd_exp:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: fcvtzs d0, d0
; CHECK-NEXT: ret
%val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
%bc = bitcast i64 %val to double
ret double %bc
}
Loading