Skip to content

Commit 371b09c

Browse files
balbir-awsKAGA-KOKO
authored andcommitted
x86/mm: Refactor cond_ibpb() to support other use cases
cond_ibpb() has the necessary bits required to track the previous mm in switch_mm_irqs_off(). This can be reused for other use cases like L1D flushing on context switch. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Balbir Singh <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent c52787b commit 371b09c

File tree

2 files changed

+30
-25
lines changed

2 files changed

+30
-25
lines changed

arch/x86/include/asm/tlbflush.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ struct tlb_state {
8383
/* Last user mm for optimizing IBPB */
8484
union {
8585
struct mm_struct *last_user_mm;
86-
unsigned long last_user_mm_ibpb;
86+
unsigned long last_user_mm_spec;
8787
};
8888

8989
u16 loaded_mm_asid;

arch/x86/mm/tlb.c

Lines changed: 29 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,14 @@
4343
*/
4444

4545
/*
46-
* Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
47-
* stored in cpu_tlb_state.last_user_mm_ibpb.
46+
* Bits to mangle the TIF_SPEC_IB state into the mm pointer which is
47+
* stored in cpu_tlb_state.last_user_mm_spec.
4848
*/
4949
#define LAST_USER_MM_IBPB 0x1UL
50+
#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB)
51+
52+
/* Bits to set when tlbstate and flush is (re)initialized */
53+
#define LAST_USER_MM_INIT LAST_USER_MM_IBPB
5054

5155
/*
5256
* The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -317,20 +321,29 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
317321
local_irq_restore(flags);
318322
}
319323

320-
static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
324+
static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
321325
{
322326
unsigned long next_tif = task_thread_info(next)->flags;
323-
unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
327+
unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
324328

325-
return (unsigned long)next->mm | ibpb;
329+
return (unsigned long)next->mm | spec_bits;
326330
}
327331

328-
static void cond_ibpb(struct task_struct *next)
332+
static void cond_mitigation(struct task_struct *next)
329333
{
334+
unsigned long prev_mm, next_mm;
335+
330336
if (!next || !next->mm)
331337
return;
332338

339+
next_mm = mm_mangle_tif_spec_bits(next);
340+
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
341+
333342
/*
343+
* Avoid user/user BTB poisoning by flushing the branch predictor
344+
* when switching between processes. This stops one process from
345+
* doing Spectre-v2 attacks on another.
346+
*
334347
* Both, the conditional and the always IBPB mode use the mm
335348
* pointer to avoid the IBPB when switching between tasks of the
336349
* same process. Using the mm pointer instead of mm->context.ctx_id
@@ -340,8 +353,6 @@ static void cond_ibpb(struct task_struct *next)
340353
* exposed data is not really interesting.
341354
*/
342355
if (static_branch_likely(&switch_mm_cond_ibpb)) {
343-
unsigned long prev_mm, next_mm;
344-
345356
/*
346357
* This is a bit more complex than the always mode because
347358
* it has to handle two cases:
@@ -371,20 +382,14 @@ static void cond_ibpb(struct task_struct *next)
371382
* Optimize this with reasonably small overhead for the
372383
* above cases. Mangle the TIF_SPEC_IB bit into the mm
373384
* pointer of the incoming task which is stored in
374-
* cpu_tlbstate.last_user_mm_ibpb for comparison.
375-
*/
376-
next_mm = mm_mangle_tif_spec_ib(next);
377-
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
378-
379-
/*
385+
* cpu_tlbstate.last_user_mm_spec for comparison.
386+
*
380387
* Issue IBPB only if the mm's are different and one or
381388
* both have the IBPB bit set.
382389
*/
383390
if (next_mm != prev_mm &&
384391
(next_mm | prev_mm) & LAST_USER_MM_IBPB)
385392
indirect_branch_prediction_barrier();
386-
387-
this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
388393
}
389394

390395
if (static_branch_unlikely(&switch_mm_always_ibpb)) {
@@ -393,11 +398,12 @@ static void cond_ibpb(struct task_struct *next)
393398
* different context than the user space task which ran
394399
* last on this CPU.
395400
*/
396-
if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
401+
if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
402+
(unsigned long)next->mm)
397403
indirect_branch_prediction_barrier();
398-
this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
399-
}
400404
}
405+
406+
this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
401407
}
402408

403409
#ifdef CONFIG_PERF_EVENTS
@@ -531,11 +537,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
531537
need_flush = true;
532538
} else {
533539
/*
534-
* Avoid user/user BTB poisoning by flushing the branch
535-
* predictor when switching between processes. This stops
536-
* one process from doing Spectre-v2 attacks on another.
540+
* Apply process to process speculation vulnerability
541+
* mitigations if applicable.
537542
*/
538-
cond_ibpb(tsk);
543+
cond_mitigation(tsk);
539544

540545
/*
541546
* Stop remote flushes for the previous mm.
@@ -643,7 +648,7 @@ void initialize_tlbstate_and_flush(void)
643648
write_cr3(build_cr3(mm->pgd, 0));
644649

645650
/* Reinitialize tlbstate. */
646-
this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
651+
this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT);
647652
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
648653
this_cpu_write(cpu_tlbstate.next_asid, 1);
649654
this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);

0 commit comments

Comments
 (0)