Skip to content

Commit 60f07c8

Browse files
author
Martin Schwidefsky
committed
s390/mm: fix race on mm->context.flush_mm
The order in __tlb_flush_mm_lazy is to flush TLB first and then clear the mm->context.flush_mm bit. This can lead to missed flushes as the bit can be set anytime, the order needs to be the other way aronud. But this leads to a different race, __tlb_flush_mm_lazy may be called on two CPUs concurrently. If mm->context.flush_mm is cleared first then another CPU can bypass __tlb_flush_mm_lazy although the first CPU has not done the flush yet. In a virtualized environment the time until the flush is finally completed can be arbitrarily long. Add a spinlock to serialize __tlb_flush_mm_lazy and use the function in finish_arch_post_lock_switch as well. Cc: <[email protected]> Reviewed-by: Heiko Carstens <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent b3e5dc4 commit 60f07c8

File tree

3 files changed

+7
-3
lines changed

3 files changed

+7
-3
lines changed

arch/s390/include/asm/mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <linux/errno.h>
66

77
typedef struct {
8+
spinlock_t lock;
89
cpumask_t cpu_attach_mask;
910
atomic_t flush_count;
1011
unsigned int flush_mm;
@@ -27,6 +28,7 @@ typedef struct {
2728
} mm_context_t;
2829

2930
#define INIT_MM_CONTEXT(name) \
31+
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
3032
.context.pgtable_lock = \
3133
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
3234
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \

arch/s390/include/asm/mmu_context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
static inline int init_new_context(struct task_struct *tsk,
1818
struct mm_struct *mm)
1919
{
20+
spin_lock_init(&mm->context.lock);
2021
spin_lock_init(&mm->context.pgtable_lock);
2122
INIT_LIST_HEAD(&mm->context.pgtable_list);
2223
spin_lock_init(&mm->context.gmap_lock);
@@ -121,8 +122,7 @@ static inline void finish_arch_post_lock_switch(void)
121122
while (atomic_read(&mm->context.flush_count))
122123
cpu_relax();
123124
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
124-
if (mm->context.flush_mm)
125-
__tlb_flush_mm(mm);
125+
__tlb_flush_mm_lazy(mm);
126126
preempt_enable();
127127
}
128128
set_fs(current->thread.mm_segment);

arch/s390/include/asm/tlbflush.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,12 @@ static inline void __tlb_flush_kernel(void)
101101

102102
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
103103
{
104+
spin_lock(&mm->context.lock);
104105
if (mm->context.flush_mm) {
105-
__tlb_flush_mm(mm);
106106
mm->context.flush_mm = 0;
107+
__tlb_flush_mm(mm);
107108
}
109+
spin_unlock(&mm->context.lock);
108110
}
109111

110112
/*

0 commit comments

Comments
 (0)