-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[SelectionDAG] Optimize BSWAP yet again once more #165292
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
AZero13
wants to merge
6
commits into
llvm:main
Choose a base branch
from
AZero13:bswaps
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Member
|
@llvm/pr-subscribers-llvm-selectiondag @llvm/pr-subscribers-backend-arm Author: AZero13 (AZero13) ChangesPatch is 36.00 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/165292.diff 14 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index da4e40953b39a..03ea2d3158c5b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9901,15 +9901,36 @@ SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const {
case MVT::i32:
// This is meant for ARM speficially, which has ROTR but no ROTL.
if (isOperationLegalOrCustom(ISD::ROTR, VT)) {
- SDValue Mask = DAG.getConstant(0x00FF00FF, dl, VT);
- // (x & 0x00FF00FF) rotr 8 | (x rotl 8) & 0x00FF00FF
- SDValue And = DAG.getNode(ISD::AND, dl, VT, Op, Mask);
- SDValue Rotr =
- DAG.getNode(ISD::ROTR, dl, VT, And, DAG.getConstant(8, dl, SHVT));
- SDValue Rotl =
- DAG.getNode(ISD::ROTR, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
- SDValue And2 = DAG.getNode(ISD::AND, dl, VT, Rotl, Mask);
- return DAG.getNode(ISD::OR, dl, VT, Rotr, And2);
+ EVT OpVT = Op.getValueType();
+ SDValue Src = Op;
+
+ // ror rtmp, r0, #16
+ SDValue Ror16 = DAG.getNode(ISD::ROTR, dl, OpVT, Src,
+ DAG.getConstant(16, dl, SHVT));
+ // eor r1, r0, rtmp ; r1 = r0 ^ (r0 ror 16)
+ SDValue Xor1 = DAG.getNode(ISD::XOR, dl, OpVT, Src, Ror16);
+
+ // bic r1, r1, #0xff0000 (clear bits 16-23)
+ // BIC r1, r1, #0xff0000 becomes AND r1, r1, ~0x00ff0000
+ // So we need the negated value: ~0x00FF0000 = 0xFF00FFFF
+ SDValue Mask = DAG.getConstant(0xFF00FFFFu, dl, OpVT);
+ SDValue BicResult = DAG.getNode(ISD::AND, dl, OpVT, Xor1, Mask);
+
+ // mov r1, r1, lsr #8
+ SDValue Lsr8 = DAG.getNode(ISD::SRL, dl, OpVT, BicResult,
+ DAG.getConstant(8, dl, SHVT));
+
+ // ror r0, r0, #8
+ SDValue Ror8 = DAG.getNode(ISD::ROTR, dl, OpVT, Src,
+ DAG.getConstant(8, dl, SHVT));
+
+ // eor r0, Lsr8, Ror8
+ SDValue Result = DAG.getNode(ISD::XOR, dl, OpVT, Lsr8, Ror8);
+
+ if (OpVT != VT)
+ Result = DAG.getNode(ISD::TRUNCATE, dl, VT, Result);
+
+ return Result;
}
Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp3 = DAG.getNode(ISD::AND, dl, VT, Op,
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 313ae3d68fb83..6c994f36c9833 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -118,6 +118,7 @@ using namespace llvm;
#define DEBUG_TYPE "arm-isel"
STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
STATISTIC(NumConstpoolPromoted,
@@ -142,6 +143,12 @@ static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
cl::desc("Maximum size of ALL constants to promote into a constant pool"),
cl::init(128));
+static cl::opt<bool>
+ EnableOptimizeLogicalImm("arm-enable-logical-imm", cl::Hidden,
+ cl::desc("Enable ARM logical imm instruction "
+ "optimization"),
+ cl::init(true));
+
cl::opt<unsigned>
MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
cl::desc("Maximum interleave factor for MVE VLDn to generate."),
@@ -20138,6 +20145,16 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}
+static bool isLegalLogicalImmediate(unsigned Imm,
+ const ARMSubtarget *Subtarget) {
+ if (!Subtarget->isThumb())
+ return ARM_AM::getSOImmVal(Imm) != -1;
+ if (Subtarget->isThumb2())
+ return ARM_AM::getT2SOImmVal(Imm) != -1;
+ // Thumb1 only has 8-bit unsigned immediate.
+ return Imm <= 255;
+}
+
bool ARMTargetLowering::targetShrinkDemandedConstant(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
TargetLoweringOpt &TLO) const {
@@ -20146,8 +20163,7 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
if (!TLO.LegalOps)
return false;
- // Only optimize AND for now.
- if (Op.getOpcode() != ISD::AND)
+ if (!EnableOptimizeLogicalImm)
return false;
EVT VT = Op.getValueType();
@@ -20158,6 +20174,14 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
assert(VT == MVT::i32 && "Unexpected integer type");
+ // Exit early if we demand all bits.
+ if (DemandedBits.popcount() == 32)
+ return false;
+
+ // Only optimize AND for now.
+ if (Op.getOpcode() != ISD::AND)
+ return false;
+
// Make sure the RHS really is a constant.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
if (!C)
@@ -20165,21 +20189,13 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
unsigned Mask = C->getZExtValue();
+ if (Mask == 0 || Mask == ~0U)
+ return false;
+
unsigned Demanded = DemandedBits.getZExtValue();
unsigned ShrunkMask = Mask & Demanded;
unsigned ExpandedMask = Mask | ~Demanded;
- // If the mask is all zeros, let the target-independent code replace the
- // result with zero.
- if (ShrunkMask == 0)
- return false;
-
- // If the mask is all ones, erase the AND. (Currently, the target-independent
- // code won't do this, so we have to do it explicitly to avoid an infinite
- // loop in obscure cases.)
- if (ExpandedMask == ~0U)
- return TLO.CombineTo(Op, Op.getOperand(0));
-
auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
};
@@ -20192,30 +20208,61 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
return TLO.CombineTo(Op, NewOp);
};
- // Prefer uxtb mask.
- if (IsLegalMask(0xFF))
- return UseMask(0xFF);
+ // If the mask is all zeros, let the target-independent code replace the
+ // result with zero.
+ if (ShrunkMask == 0) {
+ ++NumOptimizedImms;
+ return UseMask(ShrunkMask);
+ }
- // Prefer uxth mask.
- if (IsLegalMask(0xFFFF))
- return UseMask(0xFFFF);
+ // If the mask is all ones, erase the AND. (Currently, the target-independent
+ // code won't do this, so we have to do it explicitly to avoid an infinite
+ // loop in obscure cases.)
+ if (ExpandedMask == ~0U) {
+ ++NumOptimizedImms;
+ return UseMask(ExpandedMask);
+ }
- // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
- // FIXME: Prefer a contiguous sequence of bits for other optimizations.
- if (ShrunkMask < 256)
+ // If thumb, check for uxth and uxtb masks first and foremost.
+ if (Subtarget->isThumb1Only() && Subtarget->hasV6Ops()) {
+ if (IsLegalMask(0xFF)) {
+ ++NumOptimizedImms;
+ return UseMask(0xFF);
+ }
+
+ if (IsLegalMask(0xFFFF)) {
+ ++NumOptimizedImms;
+ return UseMask(0xFFFF);
+ }
+ }
+
+ // Don't optimize if it is legal already.
+ if (isLegalLogicalImmediate(Mask, Subtarget))
+ return false;
+
+ if (isLegalLogicalImmediate(ShrunkMask, Subtarget)) {
+ ++NumOptimizedImms;
return UseMask(ShrunkMask);
+ }
- // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
- // FIXME: Prefer a contiguous sequence of bits for other optimizations.
- if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
+ // FIXME: The check for v6 is because this interferes with some ubfx
+ // optimizations
+ if (!Subtarget->hasV6Ops() &&
+ isLegalLogicalImmediate(~ExpandedMask, Subtarget)) {
+ ++NumOptimizedImms;
return UseMask(ExpandedMask);
+ }
+
+ if ((~ExpandedMask) < 256) {
+ ++NumOptimizedImms;
+ return UseMask(ExpandedMask);
+ }
// Potential improvements:
//
// We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
// We could try to prefer Thumb1 immediates which can be lowered to a
// two-instruction sequence.
- // We could try to recognize more legal ARM/Thumb2 immediates here.
return false;
}
diff --git a/llvm/lib/Target/ARM/README.txt b/llvm/lib/Target/ARM/README.txt
index ff84e07fa084a..0170cc9e4a17f 100644
--- a/llvm/lib/Target/ARM/README.txt
+++ b/llvm/lib/Target/ARM/README.txt
@@ -606,32 +606,6 @@ constant which was already loaded). Not sure what's necessary to do that.
//===---------------------------------------------------------------------===//
-The code generated for bswap on armv4/5 (CPUs without rev) is less than ideal:
-
-int a(int x) { return __builtin_bswap32(x); }
-
-a:
- mov r1, #255, 24
- mov r2, #255, 16
- and r1, r1, r0, lsr #8
- and r2, r2, r0, lsl #8
- orr r1, r1, r0, lsr #24
- orr r0, r2, r0, lsl #24
- orr r0, r0, r1
- bx lr
-
-Something like the following would be better (fewer instructions/registers):
- eor r1, r0, r0, ror #16
- bic r1, r1, #0xff0000
- mov r1, r1, lsr #8
- eor r0, r1, r0, ror #8
- bx lr
-
-A custom Thumb version would also be a slight improvement over the generic
-version.
-
-//===---------------------------------------------------------------------===//
-
Consider the following simple C code:
void foo(unsigned char *a, unsigned char *b, int *c) {
diff --git a/llvm/test/CodeGen/ARM/fpenv.ll b/llvm/test/CodeGen/ARM/fpenv.ll
index f5d87170d9153..57e264d97bc44 100644
--- a/llvm/test/CodeGen/ARM/fpenv.ll
+++ b/llvm/test/CodeGen/ARM/fpenv.ll
@@ -41,8 +41,8 @@ define void @func_05() {
; CHECK-LABEL: func_05:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmrs r0, fpscr
-; CHECK-NEXT: bic r0, r0, #12582912
; CHECK-NEXT: orr r0, r0, #4194304
+; CHECK-NEXT: bic r0, r0, #8388608
; CHECK-NEXT: vmsr fpscr, r0
; CHECK-NEXT: mov pc, lr
call void @llvm.set.rounding(i32 2)
@@ -53,8 +53,8 @@ define void @func_06() {
; CHECK-LABEL: func_06:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmrs r0, fpscr
-; CHECK-NEXT: bic r0, r0, #12582912
; CHECK-NEXT: orr r0, r0, #8388608
+; CHECK-NEXT: bic r0, r0, #4194304
; CHECK-NEXT: vmsr fpscr, r0
; CHECK-NEXT: mov pc, lr
call void @llvm.set.rounding(i32 3)
diff --git a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
index a1b6847d623d0..6f34a5fd00314 100644
--- a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
@@ -19,7 +19,7 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
define i8 @rotl_i8_const_shift(i8 %x) {
; CHECK-LABEL: rotl_i8_const_shift:
; CHECK: @ %bb.0:
-; CHECK-NEXT: uxtb r1, r0
+; CHECK-NEXT: and r1, r0, #224
; CHECK-NEXT: lsl r0, r0, #3
; CHECK-NEXT: orr r0, r0, r1, lsr #5
; CHECK-NEXT: bx lr
@@ -161,8 +161,7 @@ define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
define i8 @rotr_i8_const_shift(i8 %x) {
; CHECK-LABEL: rotr_i8_const_shift:
; CHECK: @ %bb.0:
-; CHECK-NEXT: uxtb r1, r0
-; CHECK-NEXT: lsr r1, r1, #3
+; CHECK-NEXT: ubfx r1, r0, #3, #5
; CHECK-NEXT: orr r0, r1, r0, lsl #5
; CHECK-NEXT: bx lr
%f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
diff --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 7cc623fb0a616..a21ac8944d7ad 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -21,9 +21,9 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
; ARM-LABEL: scalar_i8_signbit_eq:
; ARM: @ %bb.0:
; ARM-NEXT: uxtb r1, r1
-; ARM-NEXT: lsl r0, r0, r1
+; ARM-NEXT: mov r2, #128
+; ARM-NEXT: and r0, r2, r0, lsl r1
; ARM-NEXT: mov r1, #1
-; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #7
; ARM-NEXT: bx lr
;
@@ -42,7 +42,7 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
; THUMB78-NEXT: uxtb r1, r1
; THUMB78-NEXT: lsls r0, r1
; THUMB78-NEXT: movs r1, #1
-; THUMB78-NEXT: uxtb r0, r0
+; THUMB78-NEXT: and r0, r0, #128
; THUMB78-NEXT: eor.w r0, r1, r0, lsr #7
; THUMB78-NEXT: bx lr
%t0 = lshr i8 128, %y
@@ -122,9 +122,9 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
; ARM-LABEL: scalar_i16_signbit_eq:
; ARM: @ %bb.0:
; ARM-NEXT: uxth r1, r1
-; ARM-NEXT: lsl r0, r0, r1
+; ARM-NEXT: mov r2, #32768
+; ARM-NEXT: and r0, r2, r0, lsl r1
; ARM-NEXT: mov r1, #1
-; ARM-NEXT: uxth r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #15
; ARM-NEXT: bx lr
;
@@ -144,7 +144,7 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
; THUMB78-NEXT: uxth r1, r1
; THUMB78-NEXT: lsls r0, r1
; THUMB78-NEXT: movs r1, #1
-; THUMB78-NEXT: uxth r0, r0
+; THUMB78-NEXT: and r0, r0, #32768
; THUMB78-NEXT: eor.w r0, r1, r0, lsr #15
; THUMB78-NEXT: bx lr
%t0 = lshr i16 32768, %y
@@ -862,21 +862,35 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
;------------------------------------------------------------------------------;
define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; ARM-LABEL: scalar_i8_signbit_ne:
-; ARM: @ %bb.0:
-; ARM-NEXT: uxtb r1, r1
-; ARM-NEXT: lsl r0, r0, r1
-; ARM-NEXT: uxtb r0, r0
-; ARM-NEXT: lsr r0, r0, #7
-; ARM-NEXT: bx lr
+; ARM6-LABEL: scalar_i8_signbit_ne:
+; ARM6: @ %bb.0:
+; ARM6-NEXT: uxtb r1, r1
+; ARM6-NEXT: mov r2, #128
+; ARM6-NEXT: and r0, r2, r0, lsl r1
+; ARM6-NEXT: lsr r0, r0, #7
+; ARM6-NEXT: bx lr
;
-; THUMB-LABEL: scalar_i8_signbit_ne:
-; THUMB: @ %bb.0:
-; THUMB-NEXT: uxtb r1, r1
-; THUMB-NEXT: lsls r0, r1
-; THUMB-NEXT: uxtb r0, r0
-; THUMB-NEXT: lsrs r0, r0, #7
-; THUMB-NEXT: bx lr
+; ARM78-LABEL: scalar_i8_signbit_ne:
+; ARM78: @ %bb.0:
+; ARM78-NEXT: uxtb r1, r1
+; ARM78-NEXT: lsl r0, r0, r1
+; ARM78-NEXT: ubfx r0, r0, #7, #1
+; ARM78-NEXT: bx lr
+;
+; THUMB6-LABEL: scalar_i8_signbit_ne:
+; THUMB6: @ %bb.0:
+; THUMB6-NEXT: uxtb r1, r1
+; THUMB6-NEXT: lsls r0, r1
+; THUMB6-NEXT: uxtb r0, r0
+; THUMB6-NEXT: lsrs r0, r0, #7
+; THUMB6-NEXT: bx lr
+;
+; THUMB78-LABEL: scalar_i8_signbit_ne:
+; THUMB78: @ %bb.0:
+; THUMB78-NEXT: uxtb r1, r1
+; THUMB78-NEXT: lsls r0, r1
+; THUMB78-NEXT: ubfx r0, r0, #7, #1
+; THUMB78-NEXT: bx lr
%t0 = lshr i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
@@ -1051,3 +1065,5 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
%res = icmp eq i8 %t1, 1 ; should be comparing with 0
ret i1 %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; THUMB: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
index 5dbf8dd86b891..822bb89ecf22a 100644
--- a/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
+++ b/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -53,10 +53,8 @@ define void @i24_and_or(ptr %a) {
define void @i24_insert_bit(ptr %a, i1 zeroext %bit) {
; LE-LABEL: i24_insert_bit:
; LE: @ %bb.0:
-; LE-NEXT: mov r3, #255
; LE-NEXT: ldrh r2, [r0]
-; LE-NEXT: orr r3, r3, #57088
-; LE-NEXT: and r2, r2, r3
+; LE-NEXT: bic r2, r2, #8192
; LE-NEXT: orr r1, r2, r1, lsl #13
; LE-NEXT: strh r1, [r0]
; LE-NEXT: mov pc, lr
@@ -64,8 +62,7 @@ define void @i24_insert_bit(ptr %a, i1 zeroext %bit) {
; BE-LABEL: i24_insert_bit:
; BE: @ %bb.0:
; BE-NEXT: ldrh r2, [r0]
-; BE-NEXT: mov r3, #57088
-; BE-NEXT: orr r3, r3, #16711680
+; BE-NEXT: mvn r3, #8192
; BE-NEXT: and r2, r3, r2, lsl #8
; BE-NEXT: orr r1, r2, r1, lsl #13
; BE-NEXT: lsr r1, r1, #8
@@ -144,8 +141,7 @@ define void @i56_insert_bit(ptr %a, i1 zeroext %bit) {
; BE-LABEL: i56_insert_bit:
; BE: @ %bb.0:
; BE-NEXT: ldrh r2, [r0, #4]!
-; BE-NEXT: mov r3, #57088
-; BE-NEXT: orr r3, r3, #16711680
+; BE-NEXT: mvn r3, #8192
; BE-NEXT: and r2, r3, r2, lsl #8
; BE-NEXT: orr r1, r2, r1, lsl #13
; BE-NEXT: lsr r1, r1, #8
diff --git a/llvm/test/CodeGen/ARM/load-combine-big-endian.ll b/llvm/test/CodeGen/ARM/load-combine-big-endian.ll
index 1d5c8589429a4..81cfd6fb00fb0 100644
--- a/llvm/test/CodeGen/ARM/load-combine-big-endian.ll
+++ b/llvm/test/CodeGen/ARM/load-combine-big-endian.ll
@@ -53,12 +53,11 @@ define i32 @load_i32_by_i8_bswap(ptr %arg) {
; BSWAP is not supported by 32 bit target
; CHECK-LABEL: load_i32_by_i8_bswap:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r1, #255
; CHECK-NEXT: ldr r0, [r0]
-; CHECK-NEXT: orr r1, r1, #16711680
-; CHECK-NEXT: and r2, r0, r1
-; CHECK-NEXT: and r0, r1, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r2, ror #8
+; CHECK-NEXT: eor r1, r0, r0, ror #16
+; CHECK-NEXT: bic r1, r1, #16711680
+; CHECK-NEXT: lsr r1, r1, #8
+; CHECK-NEXT: eor r0, r1, r0, ror #8
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_i8_bswap:
@@ -221,16 +220,16 @@ define i32 @load_i32_by_i16_i8(ptr %arg) {
define i64 @load_i64_by_i8_bswap(ptr %arg) {
; CHECK-LABEL: load_i64_by_i8_bswap:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r2, #255
; CHECK-NEXT: ldr r1, [r0]
; CHECK-NEXT: ldr r0, [r0, #4]
-; CHECK-NEXT: orr r2, r2, #16711680
-; CHECK-NEXT: and r3, r0, r2
-; CHECK-NEXT: and r0, r2, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r3, ror #8
-; CHECK-NEXT: and r3, r1, r2
-; CHECK-NEXT: and r1, r2, r1, ror #24
-; CHECK-NEXT: orr r1, r1, r3, ror #8
+; CHECK-NEXT: eor r2, r0, r0, ror #16
+; CHECK-NEXT: bic r2, r2, #16711680
+; CHECK-NEXT: lsr r2, r2, #8
+; CHECK-NEXT: eor r0, r2, r0, ror #8
+; CHECK-NEXT: eor r2, r1, r1, ror #16
+; CHECK-NEXT: bic r2, r2, #16711680
+; CHECK-NEXT: lsr r2, r2, #8
+; CHECK-NEXT: eor r1, r2, r1, ror #8
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i64_by_i8_bswap:
@@ -370,12 +369,11 @@ define i64 @load_i64_by_i8(ptr %arg) {
define i32 @load_i32_by_i8_nonzero_offset(ptr %arg) {
; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r1, #255
; CHECK-NEXT: ldr r0, [r0, #1]
-; CHECK-NEXT: orr r1, r1, #16711680
-; CHECK-NEXT: and r2, r0, r1
-; CHECK-NEXT: and r0, r1, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r2, ror #8
+; CHECK-NEXT: eor r1, r0, r0, ror #16
+; CHECK-NEXT: bic r1, r1, #16711680
+; CHECK-NEXT: lsr r1, r1, #8
+; CHECK-NEXT: eor r0, r1, r0, ror #8
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_i8_nonzero_offset:
@@ -425,12 +423,11 @@ define i32 @load_i32_by_i8_nonzero_offset(ptr %arg) {
define i32 @load_i32_by_i8_neg_offset(ptr %arg) {
; CHECK-LABEL: load_i32_by_i8_neg_offset:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r1, #255
; CHECK-NEXT: ldr r0, [r0, #-4]
-; CHECK-NEXT: orr r1, r1, #16711680
-; CHECK-NEXT: and r2, r0, r1
-; CHECK-NEXT: and r0, r1, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r2, ror #8
+; CHECK-NEXT: eor r1, r0, r0, ror #16
+; CHECK-NEXT: bic r1, r1, #16711680
+; CHECK-NEXT: lsr r1, r1, #8
+; CHECK-NEXT: eor r0, r1, r0, ror #8
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_i8_neg_offset:
@@ -576,12 +573,11 @@ declare i16 @llvm.bswap.i16(i16)
define i32 @load_i32_by_bswap_i16(ptr %arg) {
; CHECK-LABEL: load_i32_by_bswap_i16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r1, #255
; CHECK-NEXT: ldr r0, [r0]
-; CHECK-NEXT: orr r1, r1, #16711680
-; CHECK-NEXT: and r2, r0, r1
-; CHECK-NEXT: and r0, r1, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r2, ror #8
+; CHECK-NEXT: eor r1, r0, r0, ror #16
+; CHECK-NEXT: bic r1, r1, #16711680
+; CHECK-NEXT: lsr r1, r1, #8
+; CHECK-NEXT: eor r0, r1, r0, ror #8
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_bswap_i16:
@@ -654,12 +650,11 @@ define i32 @load_i32_by_i8_base_offset_index(ptr %arg, i32 %i) {
; CHECK-LABEL: load_i32_by_i8_base_offset_index:
; CHECK: @ %bb.0:
; CHECK-NEXT: add r0, r0, r1
-; CHECK-NEXT: mov r1, #255
-; CHECK-NEXT: orr r1, r1, #16711680
; CHECK-NEXT: ldr r0, [r0, #12]
-; CHECK-NEXT: and r2, r0, r1
-; CHECK-NEXT: and r0, r1, r0, ror #24
-; CHECK-NEXT: orr r0, r0, r2, ror #8
+; CHECK-NEXT: eor r1, r0, r0, ror #16
+; CHECK-NEXT: bic r1, r1, #16711680
+; CHECK-NEXT: l...
[truncated]
|
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
834b13a to
8e01b9b
Compare
arsenm
reviewed
Nov 5, 2025
Contributor
Author
Also enable a switch to turn off enable-logical-imm.
Co-authored-by: Jay Foad <[email protected]>
Removed the option to enable logical immediate instruction optimization.
Contributor
Author
|
Is this good? |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Depends on #165106