Skip to content

Commit 4b18219

Browse files
committed
[AArch64] Optimize splat of extending loads to avoid GPR->FPR transfer
Loads the data into the SIMD register, thus sparing a physical register and a potentially costly movement of data. Consolidated into a template which also handles a similar bitconvert pattern.
1 parent 2e7afb1 commit 4b18219

File tree

5 files changed

+233
-35
lines changed

5 files changed

+233
-35
lines changed

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26665,11 +26665,34 @@ static SDValue performDUPCombine(SDNode *N,
2666526665
}
2666626666

2666726667
if (N->getOpcode() == AArch64ISD::DUP) {
26668+
SDValue Op = N->getOperand(0);
26669+
26670+
// Optimize DUP(extload/zextload i8/i16) to avoid GPR->FPR transfer.
26671+
// For example:
26672+
// v4i32 = DUP (i32 (zextloadi8 addr))
26673+
// =>
26674+
// v4i32 = SCALAR_TO_VECTOR (i32 (zextloadi8 addr)) ; Matches to ldr b0
26675+
// v4i32 = DUPLANE32 (v4i32), 0
26676+
if (auto *LD = dyn_cast<LoadSDNode>(Op)) {
26677+
ISD::LoadExtType ExtType = LD->getExtensionType();
26678+
EVT MemVT = LD->getMemoryVT();
26679+
EVT ElemVT = VT.getVectorElementType();
26680+
if ((ExtType == ISD::EXTLOAD || ExtType == ISD::ZEXTLOAD) &&
26681+
(MemVT == MVT::i8 || MemVT == MVT::i16) && ElemVT != MemVT &&
26682+
LD->hasOneUse()) {
26683+
EVT Vec128VT = EVT::getVectorVT(*DCI.DAG.getContext(), ElemVT,
26684+
128 / ElemVT.getSizeInBits());
26685+
SDValue ScalarToVec =
26686+
DCI.DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, Vec128VT, Op);
26687+
return DCI.DAG.getNode(getDUPLANEOp(ElemVT), DL, VT, ScalarToVec,
26688+
DCI.DAG.getConstant(0, DL, MVT::i64));
26689+
}
26690+
}
26691+
2666826692
// If the instruction is known to produce a scalar in SIMD registers, we can
2666926693
// duplicate it across the vector lanes using DUPLANE instead of moving it
2667026694
// to a GPR first. For example, this allows us to handle:
2667126695
// v4i32 = DUP (i32 (FCMGT (f32, f32)))
26672-
SDValue Op = N->getOperand(0);
2667326696
// FIXME: Ideally, we should be able to handle all instructions that
2667426697
// produce a scalar value in FPRs.
2667526698
if (Op.getOpcode() == AArch64ISD::FCMEQ ||

llvm/lib/Target/AArch64/AArch64InstrInfo.td

Lines changed: 58 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4004,26 +4004,6 @@ defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
40044004
def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
40054005
(SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
40064006

4007-
// load zero-extended i32, bitcast to f64
4008-
def : Pat <(f64 (bitconvert (i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4009-
(SUBREG_TO_REG (i64 0), (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
4010-
4011-
// load zero-extended i16, bitcast to f64
4012-
def : Pat <(f64 (bitconvert (i64 (zextloadi16 (am_indexed32 GPR64sp:$Rn, uimm12s2:$offset))))),
4013-
(SUBREG_TO_REG (i64 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
4014-
4015-
// load zero-extended i8, bitcast to f64
4016-
def : Pat <(f64 (bitconvert (i64 (zextloadi8 (am_indexed32 GPR64sp:$Rn, uimm12s1:$offset))))),
4017-
(SUBREG_TO_REG (i64 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
4018-
4019-
// load zero-extended i16, bitcast to f32
4020-
def : Pat <(f32 (bitconvert (i32 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4021-
(SUBREG_TO_REG (i32 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
4022-
4023-
// load zero-extended i8, bitcast to f32
4024-
def : Pat <(f32 (bitconvert (i32 (zextloadi8 (am_indexed16 GPR64sp:$Rn, uimm12s1:$offset))))),
4025-
(SUBREG_TO_REG (i32 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
4026-
40274007
// Pre-fetch.
40284008
def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
40294009
[(AArch64Prefetch timm:$Rt,
@@ -4375,6 +4355,64 @@ def : Pat <(v1i64 (scalar_to_vector (i64
43754355
(load (ro64.Xpat GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend))))),
43764356
(LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend)>;
43774357

4358+
// Patterns for bitconvert or scalar_to_vector of load operations.
4359+
// Enables direct SIMD register loads for small integer types (i8/i16) that are
4360+
// naturally zero-extended to i32/i64.
4361+
multiclass ExtLoad8_16AllModes<ValueType OutTy, ValueType InnerTy,
4362+
SDPatternOperator OuterOp,
4363+
PatFrags LoadOp8, PatFrags LoadOp16> {
4364+
// 8-bit loads.
4365+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4366+
(SUBREG_TO_REG (i64 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
4367+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4368+
(SUBREG_TO_REG (i64 0), (LDURBi GPR64sp:$Rn, simm9:$offset), bsub)>;
4369+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp8 (ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$extend))))),
4370+
(SUBREG_TO_REG (i64 0), (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$extend), bsub)>;
4371+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp8 (ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$extend))))),
4372+
(SUBREG_TO_REG (i64 0), (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$extend), bsub)>;
4373+
4374+
// 16-bit loads.
4375+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4376+
(SUBREG_TO_REG (i64 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
4377+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4378+
(SUBREG_TO_REG (i64 0), (LDURHi GPR64sp:$Rn, simm9:$offset), hsub)>;
4379+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp16 (ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$extend))))),
4380+
(SUBREG_TO_REG (i64 0), (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$extend), hsub)>;
4381+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp16 (ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$extend))))),
4382+
(SUBREG_TO_REG (i64 0), (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$extend), hsub)>;
4383+
}
4384+
4385+
// Extended multiclass that includes 32-bit loads in addition to 8-bit and 16-bit.
4386+
multiclass ExtLoad8_16_32AllModes<ValueType OutTy, ValueType InnerTy,
4387+
SDPatternOperator OuterOp,
4388+
PatFrags LoadOp8, PatFrags LoadOp16, PatFrags LoadOp32> {
4389+
defm : ExtLoad8_16AllModes<OutTy, InnerTy, OuterOp, LoadOp8, LoadOp16>;
4390+
4391+
// 32-bit loads.
4392+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4393+
(SUBREG_TO_REG (i64 0), (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
4394+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4395+
(SUBREG_TO_REG (i64 0), (LDURSi GPR64sp:$Rn, simm9:$offset), ssub)>;
4396+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp32 (ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$extend))))),
4397+
(SUBREG_TO_REG (i64 0), (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$extend), ssub)>;
4398+
def : Pat<(OutTy (OuterOp (InnerTy (LoadOp32 (ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$extend))))),
4399+
(SUBREG_TO_REG (i64 0), (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$extend), ssub)>;
4400+
}
4401+
4402+
// Instantiate bitconvert patterns for floating-point types.
4403+
defm : ExtLoad8_16AllModes<f32, i32, bitconvert, zextloadi8, zextloadi16>;
4404+
defm : ExtLoad8_16_32AllModes<f64, i64, bitconvert, zextloadi8, zextloadi16, zextloadi32>;
4405+
4406+
// Instantiate scalar_to_vector patterns for all vector types.
4407+
defm : ExtLoad8_16AllModes<v16i8, i32, scalar_to_vector, zextloadi8, zextloadi16>;
4408+
defm : ExtLoad8_16AllModes<v16i8, i32, scalar_to_vector, extloadi8, extloadi16>;
4409+
defm : ExtLoad8_16AllModes<v8i16, i32, scalar_to_vector, zextloadi8, zextloadi16>;
4410+
defm : ExtLoad8_16AllModes<v8i16, i32, scalar_to_vector, extloadi8, extloadi16>;
4411+
defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector, zextloadi8, zextloadi16>;
4412+
defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector, extloadi8, extloadi16>;
4413+
defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector, zextloadi8, zextloadi16, zextloadi32>;
4414+
defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector, extloadi8, extloadi16, extloadi32>;
4415+
43784416
// Pre-fetch.
43794417
defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
43804418
[(AArch64Prefetch timm:$Rt,

llvm/test/CodeGen/AArch64/aarch64-smull.ll

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -222,22 +222,20 @@ define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
222222
define <2 x i64> @smull_zext_v2i32_v2i64(ptr %A, ptr %B) nounwind {
223223
; CHECK-NEON-LABEL: smull_zext_v2i32_v2i64:
224224
; CHECK-NEON: // %bb.0:
225-
; CHECK-NEON-NEXT: ldrh w8, [x0]
226-
; CHECK-NEON-NEXT: ldrh w9, [x0, #2]
225+
; CHECK-NEON-NEXT: ldrh w8, [x0, #2]
226+
; CHECK-NEON-NEXT: ldr h0, [x0]
227227
; CHECK-NEON-NEXT: ldr d1, [x1]
228-
; CHECK-NEON-NEXT: fmov d0, x8
229-
; CHECK-NEON-NEXT: mov v0.d[1], x9
228+
; CHECK-NEON-NEXT: mov v0.d[1], x8
230229
; CHECK-NEON-NEXT: xtn v0.2s, v0.2d
231230
; CHECK-NEON-NEXT: smull v0.2d, v0.2s, v1.2s
232231
; CHECK-NEON-NEXT: ret
233232
;
234233
; CHECK-SVE-LABEL: smull_zext_v2i32_v2i64:
235234
; CHECK-SVE: // %bb.0:
236-
; CHECK-SVE-NEXT: ldrh w8, [x0]
237-
; CHECK-SVE-NEXT: ldrh w9, [x0, #2]
235+
; CHECK-SVE-NEXT: ldrh w8, [x0, #2]
236+
; CHECK-SVE-NEXT: ldr h0, [x0]
238237
; CHECK-SVE-NEXT: ldr d1, [x1]
239-
; CHECK-SVE-NEXT: fmov d0, x8
240-
; CHECK-SVE-NEXT: mov v0.d[1], x9
238+
; CHECK-SVE-NEXT: mov v0.d[1], x8
241239
; CHECK-SVE-NEXT: xtn v0.2s, v0.2d
242240
; CHECK-SVE-NEXT: smull v0.2d, v0.2s, v1.2s
243241
; CHECK-SVE-NEXT: ret
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
3+
4+
; Test optimization of DUP with extended narrow loads
5+
; This should avoid GPR->SIMD transfers by loading directly into vector registers
6+
7+
define <4 x i32> @test_dup_zextload_i8_v4i32(ptr %p) {
8+
; CHECK-LABEL: test_dup_zextload_i8_v4i32:
9+
; CHECK: // %bb.0:
10+
; CHECK-NEXT: ldr b0, [x0]
11+
; CHECK-NEXT: dup v0.4s, v0.s[0]
12+
; CHECK-NEXT: ret
13+
%load = load i8, ptr %p, align 1
14+
%ext = zext i8 %load to i32
15+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
16+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
17+
ret <4 x i32> %dup
18+
}
19+
20+
define <4 x i32> @test_dup_zextload_i16_v4i32(ptr %p) {
21+
; CHECK-LABEL: test_dup_zextload_i16_v4i32:
22+
; CHECK: // %bb.0:
23+
; CHECK-NEXT: ldr h0, [x0]
24+
; CHECK-NEXT: dup v0.4s, v0.s[0]
25+
; CHECK-NEXT: ret
26+
%load = load i16, ptr %p, align 2
27+
%ext = zext i16 %load to i32
28+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
29+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
30+
ret <4 x i32> %dup
31+
}
32+
33+
define <2 x i32> @test_dup_zextload_i8_v2i32(ptr %p) {
34+
; CHECK-LABEL: test_dup_zextload_i8_v2i32:
35+
; CHECK: // %bb.0:
36+
; CHECK-NEXT: ldr b0, [x0]
37+
; CHECK-NEXT: dup v0.2s, v0.s[0]
38+
; CHECK-NEXT: ret
39+
%load = load i8, ptr %p, align 1
40+
%ext = zext i8 %load to i32
41+
%vec = insertelement <2 x i32> poison, i32 %ext, i32 0
42+
%dup = shufflevector <2 x i32> %vec, <2 x i32> poison, <2 x i32> zeroinitializer
43+
ret <2 x i32> %dup
44+
}
45+
46+
define <2 x i32> @test_dup_zextload_i16_v2i32(ptr %p) {
47+
; CHECK-LABEL: test_dup_zextload_i16_v2i32:
48+
; CHECK: // %bb.0:
49+
; CHECK-NEXT: ldr h0, [x0]
50+
; CHECK-NEXT: dup v0.2s, v0.s[0]
51+
; CHECK-NEXT: ret
52+
%load = load i16, ptr %p, align 2
53+
%ext = zext i16 %load to i32
54+
%vec = insertelement <2 x i32> poison, i32 %ext, i32 0
55+
%dup = shufflevector <2 x i32> %vec, <2 x i32> poison, <2 x i32> zeroinitializer
56+
ret <2 x i32> %dup
57+
}
58+
59+
define <8 x i16> @test_dup_zextload_i8_v8i16(ptr %p) {
60+
; CHECK-LABEL: test_dup_zextload_i8_v8i16:
61+
; CHECK: // %bb.0:
62+
; CHECK-NEXT: ldr b0, [x0]
63+
; CHECK-NEXT: dup v0.8h, v0.h[0]
64+
; CHECK-NEXT: ret
65+
%load = load i8, ptr %p, align 1
66+
%ext = zext i8 %load to i16
67+
%vec = insertelement <8 x i16> poison, i16 %ext, i32 0
68+
%dup = shufflevector <8 x i16> %vec, <8 x i16> poison, <8 x i32> zeroinitializer
69+
ret <8 x i16> %dup
70+
}
71+
72+
define <4 x i16> @test_dup_zextload_i8_v4i16(ptr %p) {
73+
; CHECK-LABEL: test_dup_zextload_i8_v4i16:
74+
; CHECK: // %bb.0:
75+
; CHECK-NEXT: ldr b0, [x0]
76+
; CHECK-NEXT: dup v0.4h, v0.h[0]
77+
; CHECK-NEXT: ret
78+
%load = load i8, ptr %p, align 1
79+
%ext = zext i8 %load to i16
80+
%vec = insertelement <4 x i16> poison, i16 %ext, i32 0
81+
%dup = shufflevector <4 x i16> %vec, <4 x i16> poison, <4 x i32> zeroinitializer
82+
ret <4 x i16> %dup
83+
}
84+
85+
define <4 x i32> @test_dup_zextload_i8_v4i32_offset(ptr %p) {
86+
; CHECK-LABEL: test_dup_zextload_i8_v4i32_offset:
87+
; CHECK: // %bb.0:
88+
; CHECK-NEXT: ldr b0, [x0, #4]
89+
; CHECK-NEXT: dup v0.4s, v0.s[0]
90+
; CHECK-NEXT: ret
91+
%addr = getelementptr inbounds i8, ptr %p, i64 4
92+
%load = load i8, ptr %addr, align 1
93+
%ext = zext i8 %load to i32
94+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
95+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
96+
ret <4 x i32> %dup
97+
}
98+
99+
define <4 x i32> @test_dup_zextload_i16_v4i32_offset(ptr %p) {
100+
; CHECK-LABEL: test_dup_zextload_i16_v4i32_offset:
101+
; CHECK: // %bb.0:
102+
; CHECK-NEXT: ldr h0, [x0, #8]
103+
; CHECK-NEXT: dup v0.4s, v0.s[0]
104+
; CHECK-NEXT: ret
105+
%addr = getelementptr inbounds i16, ptr %p, i64 4
106+
%load = load i16, ptr %addr, align 2
107+
%ext = zext i16 %load to i32
108+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
109+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
110+
ret <4 x i32> %dup
111+
}
112+
113+
define <4 x i32> @test_dup_zextload_i8_v4i32_reg_offset(ptr %p, i64 %offset) {
114+
; CHECK-LABEL: test_dup_zextload_i8_v4i32_reg_offset:
115+
; CHECK: // %bb.0:
116+
; CHECK-NEXT: ldr b0, [x0, x1]
117+
; CHECK-NEXT: dup v0.4s, v0.s[0]
118+
; CHECK-NEXT: ret
119+
%addr = getelementptr inbounds i8, ptr %p, i64 %offset
120+
%load = load i8, ptr %addr, align 1
121+
%ext = zext i8 %load to i32
122+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
123+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
124+
ret <4 x i32> %dup
125+
}
126+
127+
define <4 x i32> @test_dup_zextload_i16_v4i32_reg_offset(ptr %p, i64 %offset) {
128+
; CHECK-LABEL: test_dup_zextload_i16_v4i32_reg_offset:
129+
; CHECK: // %bb.0:
130+
; CHECK-NEXT: ldr h0, [x0, x1, lsl #1]
131+
; CHECK-NEXT: dup v0.4s, v0.s[0]
132+
; CHECK-NEXT: ret
133+
%addr = getelementptr inbounds i16, ptr %p, i64 %offset
134+
%load = load i16, ptr %addr, align 2
135+
%ext = zext i16 %load to i32
136+
%vec = insertelement <4 x i32> poison, i32 %ext, i32 0
137+
%dup = shufflevector <4 x i32> %vec, <4 x i32> poison, <4 x i32> zeroinitializer
138+
ret <4 x i32> %dup
139+
}

llvm/test/CodeGen/AArch64/dup.ll

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ entry:
3232
define <2 x i8> @loaddup_v2i8(ptr %p) {
3333
; CHECK-LABEL: loaddup_v2i8:
3434
; CHECK: // %bb.0: // %entry
35-
; CHECK-NEXT: ldrb w8, [x0]
36-
; CHECK-NEXT: dup v0.2s, w8
35+
; CHECK-NEXT: ldr b0, [x0]
36+
; CHECK-NEXT: dup v0.2s, v0.s[0]
3737
; CHECK-NEXT: ret
3838
entry:
3939
%a = load i8, ptr %p
@@ -189,8 +189,8 @@ entry:
189189
define <4 x i8> @loaddup_v4i8(ptr %p) {
190190
; CHECK-SD-LABEL: loaddup_v4i8:
191191
; CHECK-SD: // %bb.0: // %entry
192-
; CHECK-SD-NEXT: ldrb w8, [x0]
193-
; CHECK-SD-NEXT: dup v0.4h, w8
192+
; CHECK-SD-NEXT: ldr b0, [x0]
193+
; CHECK-SD-NEXT: dup v0.4h, v0.h[0]
194194
; CHECK-SD-NEXT: ret
195195
;
196196
; CHECK-GI-LABEL: loaddup_v4i8:
@@ -444,8 +444,8 @@ entry:
444444
define <2 x i16> @loaddup_v2i16(ptr %p) {
445445
; CHECK-SD-LABEL: loaddup_v2i16:
446446
; CHECK-SD: // %bb.0: // %entry
447-
; CHECK-SD-NEXT: ldrh w8, [x0]
448-
; CHECK-SD-NEXT: dup v0.2s, w8
447+
; CHECK-SD-NEXT: ldr h0, [x0]
448+
; CHECK-SD-NEXT: dup v0.2s, v0.s[0]
449449
; CHECK-SD-NEXT: ret
450450
;
451451
; CHECK-GI-LABEL: loaddup_v2i16:

0 commit comments

Comments
 (0)