Skip to content

Commit 3b3d7cc

Browse files
docularxugbtucker
authored andcommitted
Enable SVE in ISA-L erasure code for aarch64
This patch adds Arm (aarch64) SVE [1] variable-length vector assembly support into ISA-L erasure code library. "Arm designed the Scalable Vector Extension (SVE) as a next-generation SIMD extension to AArch64. SVE allows flexible vector length implementations with a range of possible values in CPU implementations. The vector length can vary from a minimum of 128 bits up to a maximum of 2048 bits, at 128-bit increments. The SVE design guarantees that the same application can run on different implementations that support SVE, without the need to recompile the code. " [3] Test method: - This patch was tested on Fujitsu's A64FX [2], and it passed all erasure code related test cases, including "make checks" , "make test", and "make perf". - To ensure code testing coverage, parameters in files (erasure_code/ erasure_code_test.c , erasure_code_update_test.c and gf_vect_mad_test.c) are modified to cover all _vect versions of _mad_sve() / _dot_prod_sve() rutines. Performance improvements over NEON: In general, SVE benchmarks (bandwidth in MB/s) are 40% ~ 100% higher than NEON when running _cold style (data uncached and pulled from memory) perfs. This includes routines of dot_prod, mad, and mul. Optimization points: This patch was tuned for the best performance on A64FX. Tuning points being touched in this patch include: 1) Data prefetch into L2 cache before loading. See _sve.S files. 2) Instruction sequence orchestration. Such as interleaving every two 'ld1b/st1b' instructions with other instructions. See _sve.S files. 3) To improve dest vectors parallelism, in highlevel, running gf_4vect_dot_prod_sve twice is better than running gf_8vect_dot_prod_sve() once, and it's also better than running _7vect + _vect, _6vect + _2vect, and _5vect + _3vect. The similar idea is applied to improve 11 ~ 9 dest vectors dot product computing as well. The related change can be found in ec_encode_data_sve() of file: erasure_code/aarch64/ec_aarch64_highlevel_func.c Notes: 1) About vector length: A64FX has a vector register length of 512bit. However, this patchset was written with variable length assembly so it work automatically on aarch64 machines with any types of SVE vector length, such as SVE-128, SVE-256, etc.. 2) About optimization: Due to differences in microarchitecture and cache/memory design, to achieve optimum performance on SVE capable CPUs other than A64FX, it is considered necessary to do microarchitecture-level tunings on these CPUs. [1] Introduction to SVE - Arm Developer. https://developer.arm.com/documentation/102476/latest/ [2] FUJITSU Processor A64FX. https://www.fujitsu.com/global/products/computing/servers/supercomputer/a64fx/ [3] Introducing SVE. https://developer.arm.com/documentation/102476/0001/Introducing-SVE Change-Id: If49eb8a956154d799dcda0ba4c9c6d979f5064a9 Signed-off-by: Guodong Xu <[email protected]>
1 parent 642ef36 commit 3b3d7cc

18 files changed

+3121
-5
lines changed

erasure_code/aarch64/Makefile.am

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,4 +42,19 @@ lsrc_aarch64 += \
4242
erasure_code/aarch64/gf_5vect_mad_neon.S \
4343
erasure_code/aarch64/gf_6vect_mad_neon.S \
4444
erasure_code/aarch64/gf_vect_mul_neon.S \
45+
erasure_code/aarch64/gf_vect_mad_sve.S \
46+
erasure_code/aarch64/gf_2vect_mad_sve.S \
47+
erasure_code/aarch64/gf_3vect_mad_sve.S \
48+
erasure_code/aarch64/gf_4vect_mad_sve.S \
49+
erasure_code/aarch64/gf_5vect_mad_sve.S \
50+
erasure_code/aarch64/gf_6vect_mad_sve.S \
51+
erasure_code/aarch64/gf_vect_dot_prod_sve.S \
52+
erasure_code/aarch64/gf_2vect_dot_prod_sve.S \
53+
erasure_code/aarch64/gf_3vect_dot_prod_sve.S \
54+
erasure_code/aarch64/gf_4vect_dot_prod_sve.S \
55+
erasure_code/aarch64/gf_5vect_dot_prod_sve.S \
56+
erasure_code/aarch64/gf_6vect_dot_prod_sve.S \
57+
erasure_code/aarch64/gf_7vect_dot_prod_sve.S \
58+
erasure_code/aarch64/gf_8vect_dot_prod_sve.S \
59+
erasure_code/aarch64/gf_vect_mul_sve.S \
4560
erasure_code/aarch64/ec_multibinary_arm.S

erasure_code/aarch64/ec_aarch64_dispatcher.c

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,39 +30,59 @@
3030

3131
DEFINE_INTERFACE_DISPATCHER(gf_vect_dot_prod)
3232
{
33-
if (getauxval(AT_HWCAP) & HWCAP_ASIMD)
33+
unsigned long auxval = getauxval(AT_HWCAP);
34+
35+
if (auxval & HWCAP_SVE)
36+
return PROVIDER_INFO(gf_vect_dot_prod_sve);
37+
if (auxval & HWCAP_ASIMD)
3438
return PROVIDER_INFO(gf_vect_dot_prod_neon);
3539
return PROVIDER_BASIC(gf_vect_dot_prod);
3640

3741
}
3842

3943
DEFINE_INTERFACE_DISPATCHER(gf_vect_mad)
4044
{
41-
if (getauxval(AT_HWCAP) & HWCAP_ASIMD)
45+
unsigned long auxval = getauxval(AT_HWCAP);
46+
47+
if (auxval & HWCAP_SVE)
48+
return PROVIDER_INFO(gf_vect_mad_sve);
49+
if (auxval & HWCAP_ASIMD)
4250
return PROVIDER_INFO(gf_vect_mad_neon);
4351
return PROVIDER_BASIC(gf_vect_mad);
4452

4553
}
4654

4755
DEFINE_INTERFACE_DISPATCHER(ec_encode_data)
4856
{
49-
if (getauxval(AT_HWCAP) & HWCAP_ASIMD)
57+
unsigned long auxval = getauxval(AT_HWCAP);
58+
59+
if (auxval & HWCAP_SVE)
60+
return PROVIDER_INFO(ec_encode_data_sve);
61+
if (auxval & HWCAP_ASIMD)
5062
return PROVIDER_INFO(ec_encode_data_neon);
5163
return PROVIDER_BASIC(ec_encode_data);
5264

5365
}
5466

5567
DEFINE_INTERFACE_DISPATCHER(ec_encode_data_update)
5668
{
57-
if (getauxval(AT_HWCAP) & HWCAP_ASIMD)
69+
unsigned long auxval = getauxval(AT_HWCAP);
70+
71+
if (auxval & HWCAP_SVE)
72+
return PROVIDER_INFO(ec_encode_data_update_sve);
73+
if (auxval & HWCAP_ASIMD)
5874
return PROVIDER_INFO(ec_encode_data_update_neon);
5975
return PROVIDER_BASIC(ec_encode_data_update);
6076

6177
}
6278

6379
DEFINE_INTERFACE_DISPATCHER(gf_vect_mul)
6480
{
65-
if (getauxval(AT_HWCAP) & HWCAP_ASIMD)
81+
unsigned long auxval = getauxval(AT_HWCAP);
82+
83+
if (auxval & HWCAP_SVE)
84+
return PROVIDER_INFO(gf_vect_mul_sve);
85+
if (auxval & HWCAP_ASIMD)
6686
return PROVIDER_INFO(gf_vect_mul_neon);
6787
return PROVIDER_BASIC(gf_vect_mul);
6888

erasure_code/aarch64/ec_aarch64_highlevel_func.c

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,3 +125,140 @@ void ec_encode_data_update_neon(int len, int k, int rows, int vec_i, unsigned ch
125125
break;
126126
}
127127
}
128+
129+
/* SVE */
130+
extern void gf_vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
131+
unsigned char **src, unsigned char *dest);
132+
extern void gf_2vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
133+
unsigned char **src, unsigned char **dest);
134+
extern void gf_3vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
135+
unsigned char **src, unsigned char **dest);
136+
extern void gf_4vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
137+
unsigned char **src, unsigned char **dest);
138+
extern void gf_5vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
139+
unsigned char **src, unsigned char **dest);
140+
extern void gf_6vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
141+
unsigned char **src, unsigned char **dest);
142+
extern void gf_7vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
143+
unsigned char **src, unsigned char **dest);
144+
extern void gf_8vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
145+
unsigned char **src, unsigned char **dest);
146+
extern void gf_vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
147+
unsigned char *src, unsigned char *dest);
148+
extern void gf_2vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
149+
unsigned char *src, unsigned char **dest);
150+
extern void gf_3vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
151+
unsigned char *src, unsigned char **dest);
152+
extern void gf_4vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
153+
unsigned char *src, unsigned char **dest);
154+
extern void gf_5vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
155+
unsigned char *src, unsigned char **dest);
156+
extern void gf_6vect_mad_sve(int len, int vec, int vec_i, unsigned char *gftbls,
157+
unsigned char *src, unsigned char **dest);
158+
159+
void ec_encode_data_sve(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
160+
unsigned char **coding)
161+
{
162+
if (len < 16) {
163+
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
164+
return;
165+
}
166+
167+
while (rows > 11) {
168+
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
169+
g_tbls += 6 * k * 32;
170+
coding += 6;
171+
rows -= 6;
172+
}
173+
174+
switch (rows) {
175+
case 11:
176+
/* 7 + 4 */
177+
gf_7vect_dot_prod_sve(len, k, g_tbls, data, coding);
178+
g_tbls += 7 * k * 32;
179+
coding += 7;
180+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
181+
break;
182+
case 10:
183+
/* 6 + 4 */
184+
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
185+
g_tbls += 6 * k * 32;
186+
coding += 6;
187+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
188+
break;
189+
case 9:
190+
/* 5 + 4 */
191+
gf_5vect_dot_prod_sve(len, k, g_tbls, data, coding);
192+
g_tbls += 5 * k * 32;
193+
coding += 5;
194+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
195+
break;
196+
case 8:
197+
/* 4 + 4 */
198+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
199+
g_tbls += 4 * k * 32;
200+
coding += 4;
201+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
202+
break;
203+
case 7:
204+
gf_7vect_dot_prod_sve(len, k, g_tbls, data, coding);
205+
break;
206+
case 6:
207+
gf_6vect_dot_prod_sve(len, k, g_tbls, data, coding);
208+
break;
209+
case 5:
210+
gf_5vect_dot_prod_sve(len, k, g_tbls, data, coding);
211+
break;
212+
case 4:
213+
gf_4vect_dot_prod_sve(len, k, g_tbls, data, coding);
214+
break;
215+
case 3:
216+
gf_3vect_dot_prod_sve(len, k, g_tbls, data, coding);
217+
break;
218+
case 2:
219+
gf_2vect_dot_prod_sve(len, k, g_tbls, data, coding);
220+
break;
221+
case 1:
222+
gf_vect_dot_prod_sve(len, k, g_tbls, data, *coding);
223+
break;
224+
default:
225+
break;
226+
}
227+
}
228+
229+
void ec_encode_data_update_sve(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
230+
unsigned char *data, unsigned char **coding)
231+
{
232+
if (len < 16) {
233+
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
234+
return;
235+
}
236+
while (rows > 6) {
237+
gf_6vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
238+
g_tbls += 6 * k * 32;
239+
coding += 6;
240+
rows -= 6;
241+
}
242+
switch (rows) {
243+
case 6:
244+
gf_6vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
245+
break;
246+
case 5:
247+
gf_5vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
248+
break;
249+
case 4:
250+
gf_4vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
251+
break;
252+
case 3:
253+
gf_3vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
254+
break;
255+
case 2:
256+
gf_2vect_mad_sve(len, k, vec_i, g_tbls, data, coding);
257+
break;
258+
case 1:
259+
gf_vect_mad_sve(len, k, vec_i, g_tbls, data, *coding);
260+
break;
261+
default:
262+
break;
263+
}
264+
}
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
/*************************************************************
2+
Copyright (c) 2021 Linaro Ltd.
3+
4+
Redistribution and use in source and binary forms, with or without
5+
modification, are permitted provided that the following conditions
6+
are met:
7+
* Redistributions of source code must retain the above copyright
8+
notice, this list of conditions and the following disclaimer.
9+
* Redistributions in binary form must reproduce the above copyright
10+
notice, this list of conditions and the following disclaimer in
11+
the documentation and/or other materials provided with the
12+
distribution.
13+
* Neither the name of Huawei Corporation nor the names of its
14+
contributors may be used to endorse or promote products derived
15+
from this software without specific prior written permission.
16+
17+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28+
**********************************************************************/
29+
.text
30+
.align 6
31+
.arch armv8-a+sve
32+
33+
.global gf_2vect_dot_prod_sve
34+
.type gf_2vect_dot_prod_sve, %function
35+
/* void gf_2vect_dot_prod_sve(int len, int vlen, unsigned char *gftbls,
36+
unsigned char **src, unsigned char **dest);
37+
*/
38+
39+
/* arguments */
40+
x_len .req x0 /* vector length */
41+
x_vec .req x1 /* number of source vectors (ie. data blocks) */
42+
x_tbl .req x2
43+
x_src .req x3
44+
x_dest .req x4
45+
46+
/* returns */
47+
w_ret .req w0
48+
49+
/* local variables */
50+
x_vec_i .req x5
51+
x_ptr .req x6
52+
x_pos .req x7
53+
54+
x_tbl1 .req x8
55+
x_tbl2 .req x9
56+
x_dest1 .req x10
57+
x_dest2 .req x_dest /* reused */
58+
59+
/* r16,r17,r18,r29,r30: special role registers, avoided */
60+
/* r19..r29 and SP must be preserved */
61+
62+
/* vectors */
63+
z_mask0f .req z0
64+
65+
z_src .req z1
66+
z_src_lo .req z2
67+
z_src_hi .req z_src
68+
69+
z_dest1 .req z3
70+
71+
z_gft1_lo .req z4
72+
z_gft1_hi .req z5
73+
q_gft1_lo .req q4
74+
q_gft1_hi .req q5
75+
76+
/* bottom 64-bit of v8..v15 must be preserved if used */
77+
z_gft2_lo .req z17
78+
z_gft2_hi .req z18
79+
q_gft2_lo .req q17
80+
q_gft2_hi .req q18
81+
82+
z_dest2 .req z27
83+
84+
gf_2vect_dot_prod_sve:
85+
/* less than 16 bytes, return_fail */
86+
cmp x_len, #16
87+
blt .return_fail
88+
89+
mov z_mask0f.b, #0x0f /* z_mask0f = 0x0F0F...0F */
90+
mov x_pos, #0
91+
lsl x_vec, x_vec, #3
92+
ldp x_dest1, x_dest2, [x_dest, #8*0]
93+
94+
/* Loop 1: x_len, vector length */
95+
.Lloopsve_vl:
96+
whilelo p0.b, x_pos, x_len
97+
b.none .return_pass
98+
99+
mov x_vec_i, #0 /* clear x_vec_i */
100+
ldr x_ptr, [x_src, x_vec_i] /* x_ptr: src base addr. */
101+
102+
mov z_dest1.b, #0 /* clear z_dest1 */
103+
mov z_dest2.b, #0 /* clear z_dest2 */
104+
105+
/* gf_tbl base = (x_tbl + dest_idx * x_vec * 32) */
106+
mov x_tbl1, x_tbl /* reset x_tbl1 */
107+
add x_tbl2, x_tbl1, x_vec, LSL #2 /* reset x_tbl2 */
108+
109+
/* Loop 2: x_vec, number of source vectors (ie. data blocks) */
110+
.Lloopsve_vl_vects:
111+
/* load src data, governed by p0 */
112+
ld1b z_src.b, p0/z, [x_ptr, x_pos] /* load from: src base + pos offset */
113+
/* split 4-bit lo; 4-bit hi */
114+
and z_src_lo.d, z_src.d, z_mask0f.d
115+
lsr z_src_hi.b, z_src.b, #4
116+
117+
118+
/* gf_tbl addr: (x_tbl + dest_idx * x_vec * 32) + src_vec_idx * 32 */
119+
/* load gf_table's */
120+
ldp q_gft1_lo, q_gft1_hi, [x_tbl1], #32 /* x_tbl1 is post-added by #32 for each src vect */
121+
ldp q_gft2_lo, q_gft2_hi, [x_tbl2], #32
122+
123+
/* prefetch */
124+
prfb pldl2keep, p0, [x_tbl1]
125+
prfb pldl2keep, p0, [x_tbl2]
126+
127+
/* calc for next */
128+
add x_vec_i, x_vec_i, #8 /* move x_vec_i to next */
129+
ldr x_ptr, [x_src, x_vec_i] /* x_ptr: src base addr. */
130+
131+
/* dest 1 */
132+
/* table indexing, ie. gf(2^8) multiplication */
133+
tbl z_gft1_lo.b, {z_gft1_lo.b}, z_src_lo.b
134+
tbl z_gft1_hi.b, {z_gft1_hi.b}, z_src_hi.b
135+
/* exclusive or, ie. gf(2^8) add */
136+
eor z_dest1.d, z_gft1_lo.d, z_dest1.d
137+
eor z_dest1.d, z_dest1.d, z_gft1_hi.d
138+
139+
/* dest 2 */
140+
tbl z_gft2_lo.b, {z_gft2_lo.b}, z_src_lo.b
141+
tbl z_gft2_hi.b, {z_gft2_hi.b}, z_src_hi.b
142+
eor z_dest2.d, z_gft2_lo.d, z_dest2.d
143+
eor z_dest2.d, z_dest2.d, z_gft2_hi.d
144+
145+
cmp x_vec_i, x_vec
146+
blt .Lloopsve_vl_vects
147+
/* end of Loop 2 */
148+
149+
/* store dest data, governed by p0 */
150+
st1b z_dest1.b, p0, [x_dest1, x_pos]
151+
st1b z_dest2.b, p0, [x_dest2, x_pos]
152+
153+
/* increment one vector length */
154+
incb x_pos
155+
b .Lloopsve_vl
156+
/* end of Loop 1 */
157+
158+
.return_pass:
159+
mov w_ret, #0
160+
ret
161+
162+
.return_fail:
163+
mov w_ret, #1
164+
ret

0 commit comments

Comments
 (0)