Skip to content

Commit 7ab7ea7

Browse files
committed
Sync mypy including the stubs
1 parent fefc9a5 commit 7ab7ea7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+5911
-2
lines changed

lib-rt/base64/arch/avx/codec.c

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
#include <stdint.h>
2+
#include <stddef.h>
3+
#include <stdlib.h>
4+
5+
#include "libbase64.h"
6+
#include "../../tables/tables.h"
7+
#include "../../codecs.h"
8+
#include "config.h"
9+
#include "../../env.h"
10+
11+
#if HAVE_AVX
12+
#include <immintrin.h>
13+
14+
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
15+
#ifndef BASE64_AVX_USE_ASM
16+
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
17+
# define BASE64_AVX_USE_ASM 1
18+
# else
19+
# define BASE64_AVX_USE_ASM 0
20+
# endif
21+
#endif
22+
23+
#include "../ssse3/dec_reshuffle.c"
24+
#include "../ssse3/dec_loop.c"
25+
26+
#if BASE64_AVX_USE_ASM
27+
# include "enc_loop_asm.c"
28+
#else
29+
# include "../ssse3/enc_translate.c"
30+
# include "../ssse3/enc_reshuffle.c"
31+
# include "../ssse3/enc_loop.c"
32+
#endif
33+
34+
#endif // HAVE_AVX
35+
36+
void
37+
base64_stream_encode_avx BASE64_ENC_PARAMS
38+
{
39+
#if HAVE_AVX
40+
#include "../generic/enc_head.c"
41+
42+
// For supported compilers, use a hand-optimized inline assembly
43+
// encoder. Otherwise fall back on the SSSE3 encoder, but compiled with
44+
// AVX flags to generate better optimized AVX code.
45+
46+
#if BASE64_AVX_USE_ASM
47+
enc_loop_avx(&s, &slen, &o, &olen);
48+
#else
49+
enc_loop_ssse3(&s, &slen, &o, &olen);
50+
#endif
51+
52+
#include "../generic/enc_tail.c"
53+
#else
54+
base64_enc_stub(state, src, srclen, out, outlen);
55+
#endif
56+
}
57+
58+
int
59+
base64_stream_decode_avx BASE64_DEC_PARAMS
60+
{
61+
#if HAVE_AVX
62+
#include "../generic/dec_head.c"
63+
dec_loop_ssse3(&s, &slen, &o, &olen);
64+
#include "../generic/dec_tail.c"
65+
#else
66+
return base64_dec_stub(state, src, srclen, out, outlen);
67+
#endif
68+
}
Lines changed: 264 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,264 @@
1+
// Apologies in advance for combining the preprocessor with inline assembly,
2+
// two notoriously gnarly parts of C, but it was necessary to avoid a lot of
3+
// code repetition. The preprocessor is used to template large sections of
4+
// inline assembly that differ only in the registers used. If the code was
5+
// written out by hand, it would become very large and hard to audit.
6+
7+
// Generate a block of inline assembly that loads register R0 from memory. The
8+
// offset at which the register is loaded is set by the given round.
9+
#define LOAD(R0, ROUND) \
10+
"vlddqu ("#ROUND" * 12)(%[src]), %["R0"] \n\t"
11+
12+
// Generate a block of inline assembly that deinterleaves and shuffles register
13+
// R0 using preloaded constants. Outputs in R0 and R1.
14+
#define SHUF(R0, R1, R2) \
15+
"vpshufb %[lut0], %["R0"], %["R1"] \n\t" \
16+
"vpand %["R1"], %[msk0], %["R2"] \n\t" \
17+
"vpand %["R1"], %[msk2], %["R1"] \n\t" \
18+
"vpmulhuw %["R2"], %[msk1], %["R2"] \n\t" \
19+
"vpmullw %["R1"], %[msk3], %["R1"] \n\t" \
20+
"vpor %["R1"], %["R2"], %["R1"] \n\t"
21+
22+
// Generate a block of inline assembly that takes R0 and R1 and translates
23+
// their contents to the base64 alphabet, using preloaded constants.
24+
#define TRAN(R0, R1, R2) \
25+
"vpsubusb %[n51], %["R1"], %["R0"] \n\t" \
26+
"vpcmpgtb %[n25], %["R1"], %["R2"] \n\t" \
27+
"vpsubb %["R2"], %["R0"], %["R0"] \n\t" \
28+
"vpshufb %["R0"], %[lut1], %["R2"] \n\t" \
29+
"vpaddb %["R1"], %["R2"], %["R0"] \n\t"
30+
31+
// Generate a block of inline assembly that stores the given register R0 at an
32+
// offset set by the given round.
33+
#define STOR(R0, ROUND) \
34+
"vmovdqu %["R0"], ("#ROUND" * 16)(%[dst]) \n\t"
35+
36+
// Generate a block of inline assembly that generates a single self-contained
37+
// encoder round: fetch the data, process it, and store the result. Then update
38+
// the source and destination pointers.
39+
#define ROUND() \
40+
LOAD("a", 0) \
41+
SHUF("a", "b", "c") \
42+
TRAN("a", "b", "c") \
43+
STOR("a", 0) \
44+
"add $12, %[src] \n\t" \
45+
"add $16, %[dst] \n\t"
46+
47+
// Define a macro that initiates a three-way interleaved encoding round by
48+
// preloading registers a, b and c from memory.
49+
// The register graph shows which registers are in use during each step, and
50+
// is a visual aid for choosing registers for that step. Symbol index:
51+
//
52+
// + indicates that a register is loaded by that step.
53+
// | indicates that a register is in use and must not be touched.
54+
// - indicates that a register is decommissioned by that step.
55+
// x indicates that a register is used as a temporary by that step.
56+
// V indicates that a register is an input or output to the macro.
57+
//
58+
#define ROUND_3_INIT() /* a b c d e f */ \
59+
LOAD("a", 0) /* + */ \
60+
SHUF("a", "d", "e") /* | + x */ \
61+
LOAD("b", 1) /* | + | */ \
62+
TRAN("a", "d", "e") /* | | - x */ \
63+
LOAD("c", 2) /* V V V */
64+
65+
// Define a macro that translates, shuffles and stores the input registers A, B
66+
// and C, and preloads registers D, E and F for the next round.
67+
// This macro can be arbitrarily daisy-chained by feeding output registers D, E
68+
// and F back into the next round as input registers A, B and C. The macro
69+
// carefully interleaves memory operations with data operations for optimal
70+
// pipelined performance.
71+
72+
#define ROUND_3(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
73+
LOAD(D, (ROUND + 3)) /* V V V + */ \
74+
SHUF(B, E, F) /* | | | | + x */ \
75+
STOR(A, (ROUND + 0)) /* - | | | | */ \
76+
TRAN(B, E, F) /* | | | - x */ \
77+
LOAD(E, (ROUND + 4)) /* | | | + */ \
78+
SHUF(C, A, F) /* + | | | | x */ \
79+
STOR(B, (ROUND + 1)) /* | - | | | */ \
80+
TRAN(C, A, F) /* - | | | x */ \
81+
LOAD(F, (ROUND + 5)) /* | | | + */ \
82+
SHUF(D, A, B) /* + x | | | | */ \
83+
STOR(C, (ROUND + 2)) /* | - | | | */ \
84+
TRAN(D, A, B) /* - x V V V */
85+
86+
// Define a macro that terminates a ROUND_3 macro by taking pre-loaded
87+
// registers D, E and F, and translating, shuffling and storing them.
88+
#define ROUND_3_END(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
89+
SHUF(E, A, B) /* + x V V V */ \
90+
STOR(D, (ROUND + 3)) /* | - | | */ \
91+
TRAN(E, A, B) /* - x | | */ \
92+
SHUF(F, C, D) /* + x | | */ \
93+
STOR(E, (ROUND + 4)) /* | - | */ \
94+
TRAN(F, C, D) /* - x | */ \
95+
STOR(F, (ROUND + 5)) /* - */
96+
97+
// Define a type A round. Inputs are a, b, and c, outputs are d, e, and f.
98+
#define ROUND_3_A(ROUND) \
99+
ROUND_3(ROUND, "a", "b", "c", "d", "e", "f")
100+
101+
// Define a type B round. Inputs and outputs are swapped with regard to type A.
102+
#define ROUND_3_B(ROUND) \
103+
ROUND_3(ROUND, "d", "e", "f", "a", "b", "c")
104+
105+
// Terminating macro for a type A round.
106+
#define ROUND_3_A_LAST(ROUND) \
107+
ROUND_3_A(ROUND) \
108+
ROUND_3_END(ROUND, "a", "b", "c", "d", "e", "f")
109+
110+
// Terminating macro for a type B round.
111+
#define ROUND_3_B_LAST(ROUND) \
112+
ROUND_3_B(ROUND) \
113+
ROUND_3_END(ROUND, "d", "e", "f", "a", "b", "c")
114+
115+
// Suppress clang's warning that the literal string in the asm statement is
116+
// overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
117+
// compilers). It may be true, but the goal here is not C99 portability.
118+
#pragma GCC diagnostic push
119+
#pragma GCC diagnostic ignored "-Woverlength-strings"
120+
121+
static inline void
122+
enc_loop_avx (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
123+
{
124+
// For a clearer explanation of the algorithm used by this function,
125+
// please refer to the plain (not inline assembly) implementation. This
126+
// function follows the same basic logic.
127+
128+
if (*slen < 16) {
129+
return;
130+
}
131+
132+
// Process blocks of 12 bytes at a time. Input is read in blocks of 16
133+
// bytes, so "reserve" four bytes from the input buffer to ensure that
134+
// we never read beyond the end of the input buffer.
135+
size_t rounds = (*slen - 4) / 12;
136+
137+
*slen -= rounds * 12; // 12 bytes consumed per round
138+
*olen += rounds * 16; // 16 bytes produced per round
139+
140+
// Number of times to go through the 36x loop.
141+
size_t loops = rounds / 36;
142+
143+
// Number of rounds remaining after the 36x loop.
144+
rounds %= 36;
145+
146+
// Lookup tables.
147+
const __m128i lut0 = _mm_set_epi8(
148+
10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1);
149+
150+
const __m128i lut1 = _mm_setr_epi8(
151+
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
152+
153+
// Temporary registers.
154+
__m128i a, b, c, d, e, f;
155+
156+
__asm__ volatile (
157+
158+
// If there are 36 rounds or more, enter a 36x unrolled loop of
159+
// interleaved encoding rounds. The rounds interleave memory
160+
// operations (load/store) with data operations (table lookups,
161+
// etc) to maximize pipeline throughput.
162+
" test %[loops], %[loops] \n\t"
163+
" jz 18f \n\t"
164+
" jmp 36f \n\t"
165+
" \n\t"
166+
".balign 64 \n\t"
167+
"36: " ROUND_3_INIT()
168+
" " ROUND_3_A( 0)
169+
" " ROUND_3_B( 3)
170+
" " ROUND_3_A( 6)
171+
" " ROUND_3_B( 9)
172+
" " ROUND_3_A(12)
173+
" " ROUND_3_B(15)
174+
" " ROUND_3_A(18)
175+
" " ROUND_3_B(21)
176+
" " ROUND_3_A(24)
177+
" " ROUND_3_B(27)
178+
" " ROUND_3_A_LAST(30)
179+
" add $(12 * 36), %[src] \n\t"
180+
" add $(16 * 36), %[dst] \n\t"
181+
" dec %[loops] \n\t"
182+
" jnz 36b \n\t"
183+
184+
// Enter an 18x unrolled loop for rounds of 18 or more.
185+
"18: cmp $18, %[rounds] \n\t"
186+
" jl 9f \n\t"
187+
" " ROUND_3_INIT()
188+
" " ROUND_3_A(0)
189+
" " ROUND_3_B(3)
190+
" " ROUND_3_A(6)
191+
" " ROUND_3_B(9)
192+
" " ROUND_3_A_LAST(12)
193+
" sub $18, %[rounds] \n\t"
194+
" add $(12 * 18), %[src] \n\t"
195+
" add $(16 * 18), %[dst] \n\t"
196+
197+
// Enter a 9x unrolled loop for rounds of 9 or more.
198+
"9: cmp $9, %[rounds] \n\t"
199+
" jl 6f \n\t"
200+
" " ROUND_3_INIT()
201+
" " ROUND_3_A(0)
202+
" " ROUND_3_B_LAST(3)
203+
" sub $9, %[rounds] \n\t"
204+
" add $(12 * 9), %[src] \n\t"
205+
" add $(16 * 9), %[dst] \n\t"
206+
207+
// Enter a 6x unrolled loop for rounds of 6 or more.
208+
"6: cmp $6, %[rounds] \n\t"
209+
" jl 55f \n\t"
210+
" " ROUND_3_INIT()
211+
" " ROUND_3_A_LAST(0)
212+
" sub $6, %[rounds] \n\t"
213+
" add $(12 * 6), %[src] \n\t"
214+
" add $(16 * 6), %[dst] \n\t"
215+
216+
// Dispatch the remaining rounds 0..5.
217+
"55: cmp $3, %[rounds] \n\t"
218+
" jg 45f \n\t"
219+
" je 3f \n\t"
220+
" cmp $1, %[rounds] \n\t"
221+
" jg 2f \n\t"
222+
" je 1f \n\t"
223+
" jmp 0f \n\t"
224+
225+
"45: cmp $4, %[rounds] \n\t"
226+
" je 4f \n\t"
227+
228+
// Block of non-interlaced encoding rounds, which can each
229+
// individually be jumped to. Rounds fall through to the next.
230+
"5: " ROUND()
231+
"4: " ROUND()
232+
"3: " ROUND()
233+
"2: " ROUND()
234+
"1: " ROUND()
235+
"0: \n\t"
236+
237+
// Outputs (modified).
238+
: [rounds] "+r" (rounds),
239+
[loops] "+r" (loops),
240+
[src] "+r" (*s),
241+
[dst] "+r" (*o),
242+
[a] "=&x" (a),
243+
[b] "=&x" (b),
244+
[c] "=&x" (c),
245+
[d] "=&x" (d),
246+
[e] "=&x" (e),
247+
[f] "=&x" (f)
248+
249+
// Inputs (not modified).
250+
: [lut0] "x" (lut0),
251+
[lut1] "x" (lut1),
252+
[msk0] "x" (_mm_set1_epi32(0x0FC0FC00)),
253+
[msk1] "x" (_mm_set1_epi32(0x04000040)),
254+
[msk2] "x" (_mm_set1_epi32(0x003F03F0)),
255+
[msk3] "x" (_mm_set1_epi32(0x01000010)),
256+
[n51] "x" (_mm_set1_epi8(51)),
257+
[n25] "x" (_mm_set1_epi8(25))
258+
259+
// Clobbers.
260+
: "cc", "memory"
261+
);
262+
}
263+
264+
#pragma GCC diagnostic pop

lib-rt/base64/arch/avx2/codec.c

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
#include <stdint.h>
2+
#include <stddef.h>
3+
#include <stdlib.h>
4+
5+
#include "libbase64.h"
6+
#include "../../tables/tables.h"
7+
#include "../../codecs.h"
8+
#include "config.h"
9+
#include "../../env.h"
10+
11+
#if HAVE_AVX2
12+
#include <immintrin.h>
13+
14+
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
15+
#ifndef BASE64_AVX2_USE_ASM
16+
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
17+
# define BASE64_AVX2_USE_ASM 1
18+
# else
19+
# define BASE64_AVX2_USE_ASM 0
20+
# endif
21+
#endif
22+
23+
#include "dec_reshuffle.c"
24+
#include "dec_loop.c"
25+
26+
#if BASE64_AVX2_USE_ASM
27+
# include "enc_loop_asm.c"
28+
#else
29+
# include "enc_translate.c"
30+
# include "enc_reshuffle.c"
31+
# include "enc_loop.c"
32+
#endif
33+
34+
#endif // HAVE_AVX2
35+
36+
void
37+
base64_stream_encode_avx2 BASE64_ENC_PARAMS
38+
{
39+
#if HAVE_AVX2
40+
#include "../generic/enc_head.c"
41+
enc_loop_avx2(&s, &slen, &o, &olen);
42+
#include "../generic/enc_tail.c"
43+
#else
44+
base64_enc_stub(state, src, srclen, out, outlen);
45+
#endif
46+
}
47+
48+
int
49+
base64_stream_decode_avx2 BASE64_DEC_PARAMS
50+
{
51+
#if HAVE_AVX2
52+
#include "../generic/dec_head.c"
53+
dec_loop_avx2(&s, &slen, &o, &olen);
54+
#include "../generic/dec_tail.c"
55+
#else
56+
return base64_dec_stub(state, src, srclen, out, outlen);
57+
#endif
58+
}

0 commit comments

Comments
 (0)