Skip to content

Commit bce5a1e

Browse files
nickdesaulniershansendc
authored andcommitted
x86/mem: Move memmove to out of line assembler
When building ARCH=i386 with CONFIG_LTO_CLANG_FULL=y, it's possible (depending on additional configs which I have not been able to isolate) to observe a failure during register allocation: error: inline assembly requires more registers than available when memmove is inlined into tcp_v4_fill_cb() or tcp_v6_fill_cb(). memmove is quite large and probably shouldn't be inlined due to size alone. A noinline function attribute would be the simplest fix, but there's a few things that stand out with the current definition: In addition to having complex constraints that can't always be resolved, the clobber list seems to be missing %bx. By using numbered operands rather than symbolic operands, the constraints are quite obnoxious to refactor. Having a large function be 99% inline asm is a code smell that this function should simply be written in stand-alone out-of-line assembler. Moving this to out of line assembler guarantees that the compiler cannot inline calls to memmove. This has been done previously for 64b: commit 9599ec0 ("x86-64, mem: Convert memmove() to assembly file and fix return value bug") That gives the opportunity for other cleanups like fixing the inconsistent use of tabs vs spaces and instruction suffixes, and the label 3 appearing twice. Symbolic operands, local labels, and additional comments would provide this code with a fresh coat of paint. Finally, add a test that tickles the `rep movsl` implementation to test it for correctness, since it has implicit operands. Suggested-by: Ingo Molnar <[email protected]> Suggested-by: David Laight <[email protected]> Signed-off-by: Nick Desaulniers <[email protected]> Signed-off-by: Dave Hansen <[email protected]> Reviewed-by: Kees Cook <[email protected]> Tested-by: Kees Cook <[email protected]> Tested-by: Nathan Chancellor <[email protected]> Link: https://lore.kernel.org/all/20221018172155.287409-1-ndesaulniers%40google.com
1 parent 30a0b95 commit bce5a1e

File tree

4 files changed

+223
-187
lines changed

4 files changed

+223
-187
lines changed

arch/x86/lib/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_X86_32),y)
6060
lib-y += checksum_32.o
6161
lib-y += strstr_32.o
6262
lib-y += string_32.o
63+
lib-y += memmove_32.o
6364
ifneq ($(CONFIG_X86_CMPXCHG64),y)
6465
lib-y += cmpxchg8b_emu.o atomic64_386_32.o
6566
endif

arch/x86/lib/memcpy_32.c

Lines changed: 0 additions & 187 deletions
Original file line numberDiff line numberDiff line change
@@ -17,190 +17,3 @@ __visible void *memset(void *s, int c, size_t count)
1717
return __memset(s, c, count);
1818
}
1919
EXPORT_SYMBOL(memset);
20-
21-
__visible void *memmove(void *dest, const void *src, size_t n)
22-
{
23-
int d0,d1,d2,d3,d4,d5;
24-
char *ret = dest;
25-
26-
__asm__ __volatile__(
27-
/* Handle more 16 bytes in loop */
28-
"cmp $0x10, %0\n\t"
29-
"jb 1f\n\t"
30-
31-
/* Decide forward/backward copy mode */
32-
"cmp %2, %1\n\t"
33-
"jb 2f\n\t"
34-
35-
/*
36-
* movs instruction have many startup latency
37-
* so we handle small size by general register.
38-
*/
39-
"cmp $680, %0\n\t"
40-
"jb 3f\n\t"
41-
/*
42-
* movs instruction is only good for aligned case.
43-
*/
44-
"mov %1, %3\n\t"
45-
"xor %2, %3\n\t"
46-
"and $0xff, %3\n\t"
47-
"jz 4f\n\t"
48-
"3:\n\t"
49-
"sub $0x10, %0\n\t"
50-
51-
/*
52-
* We gobble 16 bytes forward in each loop.
53-
*/
54-
"3:\n\t"
55-
"sub $0x10, %0\n\t"
56-
"mov 0*4(%1), %3\n\t"
57-
"mov 1*4(%1), %4\n\t"
58-
"mov %3, 0*4(%2)\n\t"
59-
"mov %4, 1*4(%2)\n\t"
60-
"mov 2*4(%1), %3\n\t"
61-
"mov 3*4(%1), %4\n\t"
62-
"mov %3, 2*4(%2)\n\t"
63-
"mov %4, 3*4(%2)\n\t"
64-
"lea 0x10(%1), %1\n\t"
65-
"lea 0x10(%2), %2\n\t"
66-
"jae 3b\n\t"
67-
"add $0x10, %0\n\t"
68-
"jmp 1f\n\t"
69-
70-
/*
71-
* Handle data forward by movs.
72-
*/
73-
".p2align 4\n\t"
74-
"4:\n\t"
75-
"mov -4(%1, %0), %3\n\t"
76-
"lea -4(%2, %0), %4\n\t"
77-
"shr $2, %0\n\t"
78-
"rep movsl\n\t"
79-
"mov %3, (%4)\n\t"
80-
"jmp 11f\n\t"
81-
/*
82-
* Handle data backward by movs.
83-
*/
84-
".p2align 4\n\t"
85-
"6:\n\t"
86-
"mov (%1), %3\n\t"
87-
"mov %2, %4\n\t"
88-
"lea -4(%1, %0), %1\n\t"
89-
"lea -4(%2, %0), %2\n\t"
90-
"shr $2, %0\n\t"
91-
"std\n\t"
92-
"rep movsl\n\t"
93-
"mov %3,(%4)\n\t"
94-
"cld\n\t"
95-
"jmp 11f\n\t"
96-
97-
/*
98-
* Start to prepare for backward copy.
99-
*/
100-
".p2align 4\n\t"
101-
"2:\n\t"
102-
"cmp $680, %0\n\t"
103-
"jb 5f\n\t"
104-
"mov %1, %3\n\t"
105-
"xor %2, %3\n\t"
106-
"and $0xff, %3\n\t"
107-
"jz 6b\n\t"
108-
109-
/*
110-
* Calculate copy position to tail.
111-
*/
112-
"5:\n\t"
113-
"add %0, %1\n\t"
114-
"add %0, %2\n\t"
115-
"sub $0x10, %0\n\t"
116-
117-
/*
118-
* We gobble 16 bytes backward in each loop.
119-
*/
120-
"7:\n\t"
121-
"sub $0x10, %0\n\t"
122-
123-
"mov -1*4(%1), %3\n\t"
124-
"mov -2*4(%1), %4\n\t"
125-
"mov %3, -1*4(%2)\n\t"
126-
"mov %4, -2*4(%2)\n\t"
127-
"mov -3*4(%1), %3\n\t"
128-
"mov -4*4(%1), %4\n\t"
129-
"mov %3, -3*4(%2)\n\t"
130-
"mov %4, -4*4(%2)\n\t"
131-
"lea -0x10(%1), %1\n\t"
132-
"lea -0x10(%2), %2\n\t"
133-
"jae 7b\n\t"
134-
/*
135-
* Calculate copy position to head.
136-
*/
137-
"add $0x10, %0\n\t"
138-
"sub %0, %1\n\t"
139-
"sub %0, %2\n\t"
140-
141-
/*
142-
* Move data from 8 bytes to 15 bytes.
143-
*/
144-
".p2align 4\n\t"
145-
"1:\n\t"
146-
"cmp $8, %0\n\t"
147-
"jb 8f\n\t"
148-
"mov 0*4(%1), %3\n\t"
149-
"mov 1*4(%1), %4\n\t"
150-
"mov -2*4(%1, %0), %5\n\t"
151-
"mov -1*4(%1, %0), %1\n\t"
152-
153-
"mov %3, 0*4(%2)\n\t"
154-
"mov %4, 1*4(%2)\n\t"
155-
"mov %5, -2*4(%2, %0)\n\t"
156-
"mov %1, -1*4(%2, %0)\n\t"
157-
"jmp 11f\n\t"
158-
159-
/*
160-
* Move data from 4 bytes to 7 bytes.
161-
*/
162-
".p2align 4\n\t"
163-
"8:\n\t"
164-
"cmp $4, %0\n\t"
165-
"jb 9f\n\t"
166-
"mov 0*4(%1), %3\n\t"
167-
"mov -1*4(%1, %0), %4\n\t"
168-
"mov %3, 0*4(%2)\n\t"
169-
"mov %4, -1*4(%2, %0)\n\t"
170-
"jmp 11f\n\t"
171-
172-
/*
173-
* Move data from 2 bytes to 3 bytes.
174-
*/
175-
".p2align 4\n\t"
176-
"9:\n\t"
177-
"cmp $2, %0\n\t"
178-
"jb 10f\n\t"
179-
"movw 0*2(%1), %%dx\n\t"
180-
"movw -1*2(%1, %0), %%bx\n\t"
181-
"movw %%dx, 0*2(%2)\n\t"
182-
"movw %%bx, -1*2(%2, %0)\n\t"
183-
"jmp 11f\n\t"
184-
185-
/*
186-
* Move data for 1 byte.
187-
*/
188-
".p2align 4\n\t"
189-
"10:\n\t"
190-
"cmp $1, %0\n\t"
191-
"jb 11f\n\t"
192-
"movb (%1), %%cl\n\t"
193-
"movb %%cl, (%2)\n\t"
194-
".p2align 4\n\t"
195-
"11:"
196-
: "=&c" (d0), "=&S" (d1), "=&D" (d2),
197-
"=r" (d3),"=r" (d4), "=r"(d5)
198-
:"0" (n),
199-
"1" (src),
200-
"2" (dest)
201-
:"memory");
202-
203-
return ret;
204-
205-
}
206-
EXPORT_SYMBOL(memmove);

arch/x86/lib/memmove_32.S

Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,200 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
#include <linux/linkage.h>
4+
#include <asm/export.h>
5+
6+
SYM_FUNC_START(memmove)
7+
/*
8+
* void *memmove(void *dest_in, const void *src_in, size_t n)
9+
* -mregparm=3 passes these in registers:
10+
* dest_in: %eax
11+
* src_in: %edx
12+
* n: %ecx
13+
* See also: arch/x86/entry/calling.h for description of the calling convention.
14+
*
15+
* n can remain in %ecx, but for `rep movsl`, we'll need dest in %edi and src
16+
* in %esi.
17+
*/
18+
.set dest_in, %eax
19+
.set dest, %edi
20+
.set src_in, %edx
21+
.set src, %esi
22+
.set n, %ecx
23+
.set tmp0, %edx
24+
.set tmp0w, %dx
25+
.set tmp1, %ebx
26+
.set tmp1w, %bx
27+
.set tmp2, %eax
28+
.set tmp3b, %cl
29+
30+
/*
31+
* Save all callee-saved registers, because this function is going to clobber
32+
* all of them:
33+
*/
34+
pushl %ebp
35+
movl %esp, %ebp // set standard frame pointer
36+
37+
pushl %ebx
38+
pushl %edi
39+
pushl %esi
40+
pushl %eax // save 'dest_in' parameter [eax] as the return value
41+
42+
movl src_in, src
43+
movl dest_in, dest
44+
45+
/* Handle more 16 bytes in loop */
46+
cmpl $0x10, n
47+
jb .Lmove_16B
48+
49+
/* Decide forward/backward copy mode */
50+
cmpl dest, src
51+
jb .Lbackwards_header
52+
53+
/*
54+
* movs instruction have many startup latency
55+
* so we handle small size by general register.
56+
*/
57+
cmpl $680, n
58+
jb .Ltoo_small_forwards
59+
/* movs instruction is only good for aligned case. */
60+
movl src, tmp0
61+
xorl dest, tmp0
62+
andl $0xff, tmp0
63+
jz .Lforward_movs
64+
.Ltoo_small_forwards:
65+
subl $0x10, n
66+
67+
/* We gobble 16 bytes forward in each loop. */
68+
.Lmove_16B_forwards_loop:
69+
subl $0x10, n
70+
movl 0*4(src), tmp0
71+
movl 1*4(src), tmp1
72+
movl tmp0, 0*4(dest)
73+
movl tmp1, 1*4(dest)
74+
movl 2*4(src), tmp0
75+
movl 3*4(src), tmp1
76+
movl tmp0, 2*4(dest)
77+
movl tmp1, 3*4(dest)
78+
leal 0x10(src), src
79+
leal 0x10(dest), dest
80+
jae .Lmove_16B_forwards_loop
81+
addl $0x10, n
82+
jmp .Lmove_16B
83+
84+
/* Handle data forward by movs. */
85+
.p2align 4
86+
.Lforward_movs:
87+
movl -4(src, n), tmp0
88+
leal -4(dest, n), tmp1
89+
shrl $2, n
90+
rep movsl
91+
movl tmp0, (tmp1)
92+
jmp .Ldone
93+
94+
/* Handle data backward by movs. */
95+
.p2align 4
96+
.Lbackwards_movs:
97+
movl (src), tmp0
98+
movl dest, tmp1
99+
leal -4(src, n), src
100+
leal -4(dest, n), dest
101+
shrl $2, n
102+
std
103+
rep movsl
104+
movl tmp0,(tmp1)
105+
cld
106+
jmp .Ldone
107+
108+
/* Start to prepare for backward copy. */
109+
.p2align 4
110+
.Lbackwards_header:
111+
cmpl $680, n
112+
jb .Ltoo_small_backwards
113+
movl src, tmp0
114+
xorl dest, tmp0
115+
andl $0xff, tmp0
116+
jz .Lbackwards_movs
117+
118+
/* Calculate copy position to tail. */
119+
.Ltoo_small_backwards:
120+
addl n, src
121+
addl n, dest
122+
subl $0x10, n
123+
124+
/* We gobble 16 bytes backward in each loop. */
125+
.Lmove_16B_backwards_loop:
126+
subl $0x10, n
127+
128+
movl -1*4(src), tmp0
129+
movl -2*4(src), tmp1
130+
movl tmp0, -1*4(dest)
131+
movl tmp1, -2*4(dest)
132+
movl -3*4(src), tmp0
133+
movl -4*4(src), tmp1
134+
movl tmp0, -3*4(dest)
135+
movl tmp1, -4*4(dest)
136+
leal -0x10(src), src
137+
leal -0x10(dest), dest
138+
jae .Lmove_16B_backwards_loop
139+
/* Calculate copy position to head. */
140+
addl $0x10, n
141+
subl n, src
142+
subl n, dest
143+
144+
/* Move data from 8 bytes to 15 bytes. */
145+
.p2align 4
146+
.Lmove_16B:
147+
cmpl $8, n
148+
jb .Lmove_8B
149+
movl 0*4(src), tmp0
150+
movl 1*4(src), tmp1
151+
movl -2*4(src, n), tmp2
152+
movl -1*4(src, n), src
153+
154+
movl tmp0, 0*4(dest)
155+
movl tmp1, 1*4(dest)
156+
movl tmp2, -2*4(dest, n)
157+
movl src, -1*4(dest, n)
158+
jmp .Ldone
159+
160+
/* Move data from 4 bytes to 7 bytes. */
161+
.p2align 4
162+
.Lmove_8B:
163+
cmpl $4, n
164+
jb .Lmove_4B
165+
movl 0*4(src), tmp0
166+
movl -1*4(src, n), tmp1
167+
movl tmp0, 0*4(dest)
168+
movl tmp1, -1*4(dest, n)
169+
jmp .Ldone
170+
171+
/* Move data from 2 bytes to 3 bytes. */
172+
.p2align 4
173+
.Lmove_4B:
174+
cmpl $2, n
175+
jb .Lmove_1B
176+
movw 0*2(src), tmp0w
177+
movw -1*2(src, n), tmp1w
178+
movw tmp0w, 0*2(dest)
179+
movw tmp1w, -1*2(dest, n)
180+
jmp .Ldone
181+
182+
/* Move data for 1 byte. */
183+
.p2align 4
184+
.Lmove_1B:
185+
cmpl $1, n
186+
jb .Ldone
187+
movb (src), tmp3b
188+
movb tmp3b, (dest)
189+
.p2align 4
190+
.Ldone:
191+
popl dest_in // restore 'dest_in' [eax] as the return value
192+
/* Restore all callee-saved registers: */
193+
popl %esi
194+
popl %edi
195+
popl %ebx
196+
popl %ebp
197+
198+
RET
199+
SYM_FUNC_END(memmove)
200+
EXPORT_SYMBOL(memmove)

0 commit comments

Comments
 (0)