Skip to content

Commit 5777eae

Browse files
rmurphy-armwilldeacon
authored andcommitted
arm64: Implement optimised checksum routine
Apparently there exist certain workloads which rely heavily on software checksumming, for which the generic do_csum() implementation becomes a significant bottleneck. Therefore let's give arm64 its own optimised version - for ease of maintenance this foregoes assembly or intrisics, and is thus not actually arm64-specific, but does rely heavily on C idioms that translate well to the A64 ISA and the typical load/store capabilities of most ARMv8 CPU cores. The resulting increase in checksum throughput scales nicely with buffer size, tending towards 4x for a small in-order core (Cortex-A53), and up to 6x or more for an aggressive big core (Ampere eMAG). Reported-by: Lingyan Huang <[email protected]> Tested-by: Lingyan Huang <[email protected]> Signed-off-by: Robin Murphy <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent 46cf053 commit 5777eae

File tree

3 files changed

+129
-3
lines changed

3 files changed

+129
-3
lines changed

arch/arm64/include/asm/checksum.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
3535
}
3636
#define ip_fast_csum ip_fast_csum
3737

38+
extern unsigned int do_csum(const unsigned char *buff, int len);
39+
#define do_csum do_csum
40+
3841
#include <asm-generic/checksum.h>
3942

4043
#endif /* __ASM_CHECKSUM_H */

arch/arm64/lib/Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
# SPDX-License-Identifier: GPL-2.0
22
lib-y := clear_user.o delay.o copy_from_user.o \
33
copy_to_user.o copy_in_user.o copy_page.o \
4-
clear_page.o memchr.o memcpy.o memmove.o memset.o \
5-
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
6-
strchr.o strrchr.o tishift.o
4+
clear_page.o csum.o memchr.o memcpy.o memmove.o \
5+
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
6+
strnlen.o strchr.o strrchr.o tishift.o
77

88
ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
99
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o

arch/arm64/lib/csum.c

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
// Copyright (C) 2019-2020 Arm Ltd.
3+
4+
#include <linux/compiler.h>
5+
#include <linux/kasan-checks.h>
6+
#include <linux/kernel.h>
7+
8+
#include <net/checksum.h>
9+
10+
/* Looks dumb, but generates nice-ish code */
11+
static u64 accumulate(u64 sum, u64 data)
12+
{
13+
__uint128_t tmp = (__uint128_t)sum + data;
14+
return tmp + (tmp >> 64);
15+
}
16+
17+
unsigned int do_csum(const unsigned char *buff, int len)
18+
{
19+
unsigned int offset, shift, sum;
20+
const u64 *ptr;
21+
u64 data, sum64 = 0;
22+
23+
offset = (unsigned long)buff & 7;
24+
/*
25+
* This is to all intents and purposes safe, since rounding down cannot
26+
* result in a different page or cache line being accessed, and @buff
27+
* should absolutely not be pointing to anything read-sensitive. We do,
28+
* however, have to be careful not to piss off KASAN, which means using
29+
* unchecked reads to accommodate the head and tail, for which we'll
30+
* compensate with an explicit check up-front.
31+
*/
32+
kasan_check_read(buff, len);
33+
ptr = (u64 *)(buff - offset);
34+
len = len + offset - 8;
35+
36+
/*
37+
* Head: zero out any excess leading bytes. Shifting back by the same
38+
* amount should be at least as fast as any other way of handling the
39+
* odd/even alignment, and means we can ignore it until the very end.
40+
*/
41+
shift = offset * 8;
42+
data = READ_ONCE_NOCHECK(*ptr++);
43+
#ifdef __LITTLE_ENDIAN
44+
data = (data >> shift) << shift;
45+
#else
46+
data = (data << shift) >> shift;
47+
#endif
48+
49+
/*
50+
* Body: straightforward aligned loads from here on (the paired loads
51+
* underlying the quadword type still only need dword alignment). The
52+
* main loop strictly excludes the tail, so the second loop will always
53+
* run at least once.
54+
*/
55+
while (unlikely(len > 64)) {
56+
__uint128_t tmp1, tmp2, tmp3, tmp4;
57+
58+
tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
59+
tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
60+
tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
61+
tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
62+
63+
len -= 64;
64+
ptr += 8;
65+
66+
/* This is the "don't dump the carry flag into a GPR" idiom */
67+
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
68+
tmp2 += (tmp2 >> 64) | (tmp2 << 64);
69+
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
70+
tmp4 += (tmp4 >> 64) | (tmp4 << 64);
71+
tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
72+
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
73+
tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
74+
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
75+
tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
76+
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
77+
tmp1 = ((tmp1 >> 64) << 64) | sum64;
78+
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
79+
sum64 = tmp1 >> 64;
80+
}
81+
while (len > 8) {
82+
__uint128_t tmp;
83+
84+
sum64 = accumulate(sum64, data);
85+
tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
86+
87+
len -= 16;
88+
ptr += 2;
89+
90+
#ifdef __LITTLE_ENDIAN
91+
data = tmp >> 64;
92+
sum64 = accumulate(sum64, tmp);
93+
#else
94+
data = tmp;
95+
sum64 = accumulate(sum64, tmp >> 64);
96+
#endif
97+
}
98+
if (len > 0) {
99+
sum64 = accumulate(sum64, data);
100+
data = READ_ONCE_NOCHECK(*ptr);
101+
len -= 8;
102+
}
103+
/*
104+
* Tail: zero any over-read bytes similarly to the head, again
105+
* preserving odd/even alignment.
106+
*/
107+
shift = len * -8;
108+
#ifdef __LITTLE_ENDIAN
109+
data = (data << shift) >> shift;
110+
#else
111+
data = (data >> shift) << shift;
112+
#endif
113+
sum64 = accumulate(sum64, data);
114+
115+
/* Finally, folding */
116+
sum64 += (sum64 >> 32) | (sum64 << 32);
117+
sum = sum64 >> 32;
118+
sum += (sum >> 16) | (sum << 16);
119+
if (offset & 1)
120+
return (u16)swab32(sum);
121+
122+
return sum >> 16;
123+
}

0 commit comments

Comments
 (0)