Skip to content

Commit f8b1f54

Browse files
jgross1gregkh
authored andcommitted
x86/static-call: provide a way to do very early static-call updates
commit 0ef8047b737d7480a5d4c46d956e97c190f13050 upstream. Add static_call_update_early() for updating static-call targets in very early boot. This will be needed for support of Xen guest type specific hypercall functions. This is part of XSA-466 / CVE-2024-53241. Reported-by: Andrew Cooper <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Co-developed-by: Peter Zijlstra <[email protected]> Co-developed-by: Josh Poimboeuf <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 054f07a commit f8b1f54

File tree

6 files changed

+55
-15
lines changed

6 files changed

+55
-15
lines changed

arch/x86/include/asm/static_call.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,4 +65,19 @@
6565

6666
extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
6767

68+
extern void __static_call_update_early(void *tramp, void *func);
69+
70+
#define static_call_update_early(name, _func) \
71+
({ \
72+
typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
73+
if (static_call_initialized) { \
74+
__static_call_update(&STATIC_CALL_KEY(name), \
75+
STATIC_CALL_TRAMP_ADDR(name), __F);\
76+
} else { \
77+
WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
78+
__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
79+
__F); \
80+
} \
81+
})
82+
6883
#endif /* _ASM_STATIC_CALL_H */

arch/x86/include/asm/sync_core.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#include <asm/special_insns.h>
99

1010
#ifdef CONFIG_X86_32
11-
static inline void iret_to_self(void)
11+
static __always_inline void iret_to_self(void)
1212
{
1313
asm volatile (
1414
"pushfl\n\t"
@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
1919
: ASM_CALL_CONSTRAINT : : "memory");
2020
}
2121
#else
22-
static inline void iret_to_self(void)
22+
static __always_inline void iret_to_self(void)
2323
{
2424
unsigned int tmp;
2525

@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
5555
* Like all of Linux's memory ordering operations, this is a
5656
* compiler barrier as well.
5757
*/
58-
static inline void sync_core(void)
58+
static __always_inline void sync_core(void)
5959
{
6060
/*
6161
* The SERIALIZE instruction is the most straightforward way to

arch/x86/kernel/static_call.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
170170
}
171171
EXPORT_SYMBOL_GPL(arch_static_call_transform);
172172

173+
noinstr void __static_call_update_early(void *tramp, void *func)
174+
{
175+
BUG_ON(system_state != SYSTEM_BOOTING);
176+
BUG_ON(!early_boot_irqs_disabled);
177+
BUG_ON(static_call_initialized);
178+
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
179+
sync_core();
180+
}
181+
173182
#ifdef CONFIG_RETHUNK
174183
/*
175184
* This is called by apply_returns() to fix up static call trampolines,

include/linux/compiler.h

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -205,28 +205,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
205205

206206
#endif /* __KERNEL__ */
207207

208+
/**
209+
* offset_to_ptr - convert a relative memory offset to an absolute pointer
210+
* @off: the address of the 32-bit offset value
211+
*/
212+
static inline void *offset_to_ptr(const int *off)
213+
{
214+
return (void *)((unsigned long)off + *off);
215+
}
216+
217+
#endif /* __ASSEMBLY__ */
218+
219+
#ifdef CONFIG_64BIT
220+
#define ARCH_SEL(a,b) a
221+
#else
222+
#define ARCH_SEL(a,b) b
223+
#endif
224+
208225
/*
209226
* Force the compiler to emit 'sym' as a symbol, so that we can reference
210227
* it from inline assembler. Necessary in case 'sym' could be inlined
211228
* otherwise, or eliminated entirely due to lack of references that are
212229
* visible to the compiler.
213230
*/
214-
#define ___ADDRESSABLE(sym, __attrs) \
215-
static void * __used __attrs \
231+
#define ___ADDRESSABLE(sym, __attrs) \
232+
static void * __used __attrs \
216233
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
234+
217235
#define __ADDRESSABLE(sym) \
218236
___ADDRESSABLE(sym, __section(".discard.addressable"))
219237

220-
/**
221-
* offset_to_ptr - convert a relative memory offset to an absolute pointer
222-
* @off: the address of the 32-bit offset value
223-
*/
224-
static inline void *offset_to_ptr(const int *off)
225-
{
226-
return (void *)((unsigned long)off + *off);
227-
}
238+
#define __ADDRESSABLE_ASM(sym) \
239+
.pushsection .discard.addressable,"aw"; \
240+
.align ARCH_SEL(8,4); \
241+
ARCH_SEL(.quad, .long) __stringify(sym); \
242+
.popsection;
228243

229-
#endif /* __ASSEMBLY__ */
244+
#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
230245

231246
/* &a[0] degrades to a pointer: a different type from an array */
232247
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))

include/linux/static_call.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@
138138
#ifdef CONFIG_HAVE_STATIC_CALL
139139
#include <asm/static_call.h>
140140

141+
extern bool static_call_initialized;
141142
/*
142143
* Either @site or @tramp can be NULL.
143144
*/

kernel/static_call_inline.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
1515
extern struct static_call_tramp_key __start_static_call_tramp_key[],
1616
__stop_static_call_tramp_key[];
1717

18-
static bool static_call_initialized;
18+
bool static_call_initialized;
1919

2020
/* mutex to protect key modules/sites */
2121
static DEFINE_MUTEX(static_call_mutex);

0 commit comments

Comments
 (0)