Skip to content

Commit f69ca62

Browse files
ricardonIngo Molnar
authored andcommitted
x86/cpu: Refactor sync_core() for readability
Instead of having #ifdef/#endif blocks inside sync_core() for X86_64 and X86_32, implement the new function iret_to_self() with two versions. In this manner, avoid having to use even more more #ifdef/#endif blocks when adding support for SERIALIZE in sync_core(). Co-developed-by: Tony Luck <[email protected]> Signed-off-by: Tony Luck <[email protected]> Signed-off-by: Ricardo Neri <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9998a98 commit f69ca62

File tree

2 files changed

+32
-25
lines changed

2 files changed

+32
-25
lines changed

arch/x86/include/asm/special_insns.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p)
234234

235235
#define nop() asm volatile ("nop")
236236

237-
238237
#endif /* __KERNEL__ */
239238

240239
#endif /* _ASM_X86_SPECIAL_INSNS_H */

arch/x86/include/asm/sync_core.h

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,37 @@
66
#include <asm/processor.h>
77
#include <asm/cpufeature.h>
88

9+
#ifdef CONFIG_X86_32
10+
static inline void iret_to_self(void)
11+
{
12+
asm volatile (
13+
"pushfl\n\t"
14+
"pushl %%cs\n\t"
15+
"pushl $1f\n\t"
16+
"iret\n\t"
17+
"1:"
18+
: ASM_CALL_CONSTRAINT : : "memory");
19+
}
20+
#else
21+
static inline void iret_to_self(void)
22+
{
23+
unsigned int tmp;
24+
25+
asm volatile (
26+
"mov %%ss, %0\n\t"
27+
"pushq %q0\n\t"
28+
"pushq %%rsp\n\t"
29+
"addq $8, (%%rsp)\n\t"
30+
"pushfq\n\t"
31+
"mov %%cs, %0\n\t"
32+
"pushq %q0\n\t"
33+
"pushq $1f\n\t"
34+
"iretq\n\t"
35+
"1:"
36+
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
37+
}
38+
#endif /* CONFIG_X86_32 */
39+
940
/*
1041
* This function forces the icache and prefetched instruction stream to
1142
* catch up with reality in two very specific cases:
@@ -44,30 +75,7 @@ static inline void sync_core(void)
4475
* Like all of Linux's memory ordering operations, this is a
4576
* compiler barrier as well.
4677
*/
47-
#ifdef CONFIG_X86_32
48-
asm volatile (
49-
"pushfl\n\t"
50-
"pushl %%cs\n\t"
51-
"pushl $1f\n\t"
52-
"iret\n\t"
53-
"1:"
54-
: ASM_CALL_CONSTRAINT : : "memory");
55-
#else
56-
unsigned int tmp;
57-
58-
asm volatile (
59-
"mov %%ss, %0\n\t"
60-
"pushq %q0\n\t"
61-
"pushq %%rsp\n\t"
62-
"addq $8, (%%rsp)\n\t"
63-
"pushfq\n\t"
64-
"mov %%cs, %0\n\t"
65-
"pushq %q0\n\t"
66-
"pushq $1f\n\t"
67-
"iretq\n\t"
68-
"1:"
69-
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
70-
#endif
78+
iret_to_self();
7179
}
7280

7381
/*

0 commit comments

Comments
 (0)