Skip to content

Commit b3e5dc4

Browse files
author
Martin Schwidefsky
committed
s390/mm: fix local TLB flushing vs. detach of an mm address space
The local TLB flushing code keeps an additional mask in the mm.context, the cpu_attach_mask. At the time a global flush of an address space is done the cpu_attach_mask is copied to the mm_cpumask in order to avoid future global flushes in case the mm is used by a single CPU only after the flush. Trouble is that the reset of the mm_cpumask is racy against the detach of an mm address space by switch_mm. The current order is first the global TLB flush and then the copy of the cpu_attach_mask to the mm_cpumask. The order needs to be the other way around. Cc: <[email protected]> Reviewed-by: Heiko Carstens <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent 46fde9a commit b3e5dc4

File tree

2 files changed

+7
-23
lines changed

2 files changed

+7
-23
lines changed

arch/s390/include/asm/mmu_context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103103
if (prev == next)
104104
return;
105105
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
106-
cpumask_set_cpu(cpu, mm_cpumask(next));
107106
/* Clear old ASCE by loading the kernel ASCE. */
108107
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
109108
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
@@ -121,7 +120,7 @@ static inline void finish_arch_post_lock_switch(void)
121120
preempt_disable();
122121
while (atomic_read(&mm->context.flush_count))
123122
cpu_relax();
124-
123+
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
125124
if (mm->context.flush_mm)
126125
__tlb_flush_mm(mm);
127126
preempt_enable();
@@ -136,6 +135,7 @@ static inline void activate_mm(struct mm_struct *prev,
136135
struct mm_struct *next)
137136
{
138137
switch_mm(prev, next, current);
138+
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
139139
set_user_asce(next);
140140
}
141141

arch/s390/include/asm/tlbflush.h

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
4848
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
4949
* this implicates multiple ASCEs!).
5050
*/
51-
static inline void __tlb_flush_full(struct mm_struct *mm)
52-
{
53-
preempt_disable();
54-
atomic_inc(&mm->context.flush_count);
55-
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
56-
/* Local TLB flush */
57-
__tlb_flush_local();
58-
} else {
59-
/* Global TLB flush */
60-
__tlb_flush_global();
61-
/* Reset TLB flush mask */
62-
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
63-
}
64-
atomic_dec(&mm->context.flush_count);
65-
preempt_enable();
66-
}
67-
6851
static inline void __tlb_flush_mm(struct mm_struct *mm)
6952
{
7053
unsigned long gmap_asce;
@@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
7659
*/
7760
preempt_disable();
7861
atomic_inc(&mm->context.flush_count);
62+
/* Reset TLB flush mask */
63+
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
64+
barrier();
7965
gmap_asce = READ_ONCE(mm->context.gmap_asce);
8066
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
8167
if (gmap_asce)
8268
__tlb_flush_idte(gmap_asce);
8369
__tlb_flush_idte(mm->context.asce);
8470
} else {
85-
__tlb_flush_full(mm);
71+
/* Global TLB flush */
72+
__tlb_flush_global();
8673
}
87-
/* Reset TLB flush mask */
88-
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
8974
atomic_dec(&mm->context.flush_count);
9075
preempt_enable();
9176
}
@@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
9984
}
10085
#else
10186
#define __tlb_flush_global() __tlb_flush_local()
102-
#define __tlb_flush_full(mm) __tlb_flush_local()
10387

10488
/*
10589
* Flush TLB entries for a specific ASCE on all CPUs.

0 commit comments

Comments
 (0)