Skip to content

Commit 21ee33d

Browse files
torvaldsgregkh
authored andcommitted
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
commit a050ba1 upstream. This does the simple pattern conversion of alpha, arc, csky, hexagon, loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma() helper. They all have the regular fault handling pattern without odd special cases. The remaining architectures all have something that keeps us from a straightforward conversion: ia64 and parisc have stacks that can grow both up as well as down (and ia64 has special address region checks). And m68k, microblaze, openrisc, sparc64, and um end up having extra rules about only expanding the stack down a limited amount below the user space stack pointer. That is something that x86 used to do too (long long ago), and it probably could just be skipped, but it still makes the conversion less than trivial. Note that this conversion was done manually and with the exception of alpha without any build testing, because I have a fairly limited cross- building environment. The cases are all simple, and I went through the changes several times, but... Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Samuel Mendoza-Jonas <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 1f4197f commit 21ee33d

File tree

18 files changed

+45
-124
lines changed

18 files changed

+45
-124
lines changed

arch/alpha/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config ALPHA
2828
select GENERIC_SMP_IDLE_THREAD
2929
select HAVE_ARCH_AUDITSYSCALL
3030
select HAVE_MOD_ARCH_SPECIFIC
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select ODD_RT_SIGACTION
3334
select OLD_SIGSUSPEND

arch/alpha/mm/fault.c

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
119119
flags |= FAULT_FLAG_USER;
120120
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121121
retry:
122-
mmap_read_lock(mm);
123-
vma = find_vma(mm, address);
122+
vma = lock_mm_and_find_vma(mm, address, regs);
124123
if (!vma)
125-
goto bad_area;
126-
if (vma->vm_start <= address)
127-
goto good_area;
128-
if (!(vma->vm_flags & VM_GROWSDOWN))
129-
goto bad_area;
130-
if (expand_stack(vma, address))
131-
goto bad_area;
124+
goto bad_area_nosemaphore;
132125

133126
/* Ok, we have a good vm_area for this memory access, so
134127
we can handle it. */
135-
good_area:
136128
si_code = SEGV_ACCERR;
137129
if (cause < 0) {
138130
if (!(vma->vm_flags & VM_EXEC))
@@ -189,6 +181,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
189181
bad_area:
190182
mmap_read_unlock(mm);
191183

184+
bad_area_nosemaphore:
192185
if (user_mode(regs))
193186
goto do_sigsegv;
194187

arch/arc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ config ARC
4141
select HAVE_PERF_EVENTS
4242
select HAVE_SYSCALL_TRACEPOINTS
4343
select IRQ_DOMAIN
44+
select LOCK_MM_AND_FIND_VMA
4445
select MODULES_USE_ELF_RELA
4546
select OF
4647
select OF_EARLY_FLATTREE

arch/arc/mm/fault.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
113113

114114
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115115
retry:
116-
mmap_read_lock(mm);
117-
118-
vma = find_vma(mm, address);
116+
vma = lock_mm_and_find_vma(mm, address, regs);
119117
if (!vma)
120-
goto bad_area;
121-
if (unlikely(address < vma->vm_start)) {
122-
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123-
goto bad_area;
124-
}
118+
goto bad_area_nosemaphore;
125119

126120
/*
127121
* vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
161155
bad_area:
162156
mmap_read_unlock(mm);
163157

158+
bad_area_nosemaphore:
164159
/*
165160
* Major/minor page fault accounting
166161
* (in case of retry we only land here once)

arch/csky/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ config CSKY
9696
select HAVE_RSEQ
9797
select HAVE_STACKPROTECTOR
9898
select HAVE_SYSCALL_TRACEPOINTS
99+
select LOCK_MM_AND_FIND_VMA
99100
select MAY_HAVE_SPARSE_IRQ
100101
select MODULES_USE_ELF_RELA if MODULES
101102
select OF

arch/csky/mm/fault.c

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
9797
BUG();
9898
}
9999

100-
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
100+
static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101101
{
102102
/*
103103
* Something tried to access memory that isn't in our memory map.
104104
* Fix it, but check if it's kernel or user first.
105105
*/
106-
mmap_read_unlock(mm);
107106
/* User mode accesses just cause a SIGSEGV */
108107
if (user_mode(regs)) {
109108
do_trap(regs, SIGSEGV, code, addr);
@@ -238,32 +237,21 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
238237
if (is_write(regs))
239238
flags |= FAULT_FLAG_WRITE;
240239
retry:
241-
mmap_read_lock(mm);
242-
vma = find_vma(mm, addr);
240+
vma = lock_mm_and_find_vma(mm, address, regs);
243241
if (unlikely(!vma)) {
244-
bad_area(regs, mm, code, addr);
245-
return;
246-
}
247-
if (likely(vma->vm_start <= addr))
248-
goto good_area;
249-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250-
bad_area(regs, mm, code, addr);
251-
return;
252-
}
253-
if (unlikely(expand_stack(vma, addr))) {
254-
bad_area(regs, mm, code, addr);
242+
bad_area_nosemaphore(regs, mm, code, addr);
255243
return;
256244
}
257245

258246
/*
259247
* Ok, we have a good vm_area for this memory access, so
260248
* we can handle it.
261249
*/
262-
good_area:
263250
code = SEGV_ACCERR;
264251

265252
if (unlikely(access_error(regs, vma))) {
266-
bad_area(regs, mm, code, addr);
253+
mmap_read_unlock(mm);
254+
bad_area_nosemaphore(regs, mm, code, addr);
267255
return;
268256
}
269257

arch/hexagon/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config HEXAGON
2828
select GENERIC_SMP_IDLE_THREAD
2929
select STACKTRACE_SUPPORT
3030
select GENERIC_CLOCKEVENTS_BROADCAST
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select GENERIC_CPU_DEVICES
3334
select ARCH_WANT_LD_ORPHAN_WARN

arch/hexagon/mm/vm_fault.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
5757

5858
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
5959
retry:
60-
mmap_read_lock(mm);
61-
vma = find_vma(mm, address);
62-
if (!vma)
63-
goto bad_area;
60+
vma = lock_mm_and_find_vma(mm, address, regs);
61+
if (unlikely(!vma))
62+
goto bad_area_nosemaphore;
6463

65-
if (vma->vm_start <= address)
66-
goto good_area;
67-
68-
if (!(vma->vm_flags & VM_GROWSDOWN))
69-
goto bad_area;
70-
71-
if (expand_stack(vma, address))
72-
goto bad_area;
73-
74-
good_area:
7564
/* Address space is OK. Now check access rights. */
7665
si_code = SEGV_ACCERR;
7766

@@ -140,6 +129,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
140129
bad_area:
141130
mmap_read_unlock(mm);
142131

132+
bad_area_nosemaphore:
143133
if (user_mode(regs)) {
144134
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
145135
return;

arch/loongarch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ config LOONGARCH
107107
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
108108
select IRQ_FORCED_THREADING
109109
select IRQ_LOONGARCH_CPU
110+
select LOCK_MM_AND_FIND_VMA
110111
select MMU_GATHER_MERGE_VMAS if MMU
111112
select MODULES_USE_ELF_RELA if MODULES
112113
select NEED_PER_CPU_EMBED_FIRST_CHUNK

arch/loongarch/mm/fault.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
166166

167167
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
168168
retry:
169-
mmap_read_lock(mm);
170-
vma = find_vma(mm, address);
171-
if (!vma)
172-
goto bad_area;
173-
if (vma->vm_start <= address)
174-
goto good_area;
175-
if (!(vma->vm_flags & VM_GROWSDOWN))
176-
goto bad_area;
177-
if (!expand_stack(vma, address))
178-
goto good_area;
169+
vma = lock_mm_and_find_vma(mm, address, regs);
170+
if (unlikely(!vma))
171+
goto bad_area_nosemaphore;
172+
goto good_area;
173+
179174
/*
180175
* Something tried to access memory that isn't in our memory map..
181176
* Fix it, but check if it's kernel or user first..
182177
*/
183178
bad_area:
184179
mmap_read_unlock(mm);
180+
bad_area_nosemaphore:
185181
do_sigsegv(regs, write, address, si_code);
186182
return;
187183

0 commit comments

Comments
 (0)