Skip to content

Commit 9020d39

Browse files
KAGA-KOKOsuryasaimadhu
authored andcommitted
x86/alternatives: Move temporary_mm helpers into C
The only user of these inlines is the text poke code and this must not be exposed to the world. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent cb2a023 commit 9020d39

File tree

2 files changed

+55
-55
lines changed

2 files changed

+55
-55
lines changed

arch/x86/include/asm/mmu_context.h

Lines changed: 0 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -215,59 +215,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
215215

216216
unsigned long __get_current_cr3_fast(void);
217217

218-
typedef struct {
219-
struct mm_struct *mm;
220-
} temp_mm_state_t;
221-
222-
/*
223-
* Using a temporary mm allows to set temporary mappings that are not accessible
224-
* by other CPUs. Such mappings are needed to perform sensitive memory writes
225-
* that override the kernel memory protections (e.g., W^X), without exposing the
226-
* temporary page-table mappings that are required for these write operations to
227-
* other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
228-
* mapping is torn down.
229-
*
230-
* Context: The temporary mm needs to be used exclusively by a single core. To
231-
* harden security IRQs must be disabled while the temporary mm is
232-
* loaded, thereby preventing interrupt handler bugs from overriding
233-
* the kernel memory protection.
234-
*/
235-
static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
236-
{
237-
temp_mm_state_t temp_state;
238-
239-
lockdep_assert_irqs_disabled();
240-
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
241-
switch_mm_irqs_off(NULL, mm, current);
242-
243-
/*
244-
* If breakpoints are enabled, disable them while the temporary mm is
245-
* used. Userspace might set up watchpoints on addresses that are used
246-
* in the temporary mm, which would lead to wrong signals being sent or
247-
* crashes.
248-
*
249-
* Note that breakpoints are not disabled selectively, which also causes
250-
* kernel breakpoints (e.g., perf's) to be disabled. This might be
251-
* undesirable, but still seems reasonable as the code that runs in the
252-
* temporary mm should be short.
253-
*/
254-
if (hw_breakpoint_active())
255-
hw_breakpoint_disable();
256-
257-
return temp_state;
258-
}
259-
260-
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
261-
{
262-
lockdep_assert_irqs_disabled();
263-
switch_mm_irqs_off(NULL, prev_state.mm, current);
264-
265-
/*
266-
* Restore the breakpoints if they were disabled before the temporary mm
267-
* was loaded.
268-
*/
269-
if (hw_breakpoint_active())
270-
hw_breakpoint_restore();
271-
}
272-
273218
#endif /* _ASM_X86_MMU_CONTEXT_H */

arch/x86/kernel/alternative.c

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -783,6 +783,61 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
783783
}
784784
}
785785

786+
typedef struct {
787+
struct mm_struct *mm;
788+
} temp_mm_state_t;
789+
790+
/*
791+
* Using a temporary mm allows to set temporary mappings that are not accessible
792+
* by other CPUs. Such mappings are needed to perform sensitive memory writes
793+
* that override the kernel memory protections (e.g., W^X), without exposing the
794+
* temporary page-table mappings that are required for these write operations to
795+
* other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
796+
* mapping is torn down.
797+
*
798+
* Context: The temporary mm needs to be used exclusively by a single core. To
799+
* harden security IRQs must be disabled while the temporary mm is
800+
* loaded, thereby preventing interrupt handler bugs from overriding
801+
* the kernel memory protection.
802+
*/
803+
static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
804+
{
805+
temp_mm_state_t temp_state;
806+
807+
lockdep_assert_irqs_disabled();
808+
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
809+
switch_mm_irqs_off(NULL, mm, current);
810+
811+
/*
812+
* If breakpoints are enabled, disable them while the temporary mm is
813+
* used. Userspace might set up watchpoints on addresses that are used
814+
* in the temporary mm, which would lead to wrong signals being sent or
815+
* crashes.
816+
*
817+
* Note that breakpoints are not disabled selectively, which also causes
818+
* kernel breakpoints (e.g., perf's) to be disabled. This might be
819+
* undesirable, but still seems reasonable as the code that runs in the
820+
* temporary mm should be short.
821+
*/
822+
if (hw_breakpoint_active())
823+
hw_breakpoint_disable();
824+
825+
return temp_state;
826+
}
827+
828+
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
829+
{
830+
lockdep_assert_irqs_disabled();
831+
switch_mm_irqs_off(NULL, prev_state.mm, current);
832+
833+
/*
834+
* Restore the breakpoints if they were disabled before the temporary mm
835+
* was loaded.
836+
*/
837+
if (hw_breakpoint_active())
838+
hw_breakpoint_restore();
839+
}
840+
786841
__ro_after_init struct mm_struct *poking_mm;
787842
__ro_after_init unsigned long poking_addr;
788843

0 commit comments

Comments
 (0)