Skip to content

Commit a050ba1

Browse files
committed
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
This does the simple pattern conversion of alpha, arc, csky, hexagon, loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma() helper. They all have the regular fault handling pattern without odd special cases. The remaining architectures all have something that keeps us from a straightforward conversion: ia64 and parisc have stacks that can grow both up as well as down (and ia64 has special address region checks). And m68k, microblaze, openrisc, sparc64, and um end up having extra rules about only expanding the stack down a limited amount below the user space stack pointer. That is something that x86 used to do too (long long ago), and it probably could just be skipped, but it still makes the conversion less than trivial. Note that this conversion was done manually and with the exception of alpha without any build testing, because I have a fairly limited cross- building environment. The cases are all simple, and I went through the changes several times, but... Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8b35ca3 commit a050ba1

File tree

18 files changed

+45
-124
lines changed

18 files changed

+45
-124
lines changed

arch/alpha/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ config ALPHA
3030
select HAS_IOPORT
3131
select HAVE_ARCH_AUDITSYSCALL
3232
select HAVE_MOD_ARCH_SPECIFIC
33+
select LOCK_MM_AND_FIND_VMA
3334
select MODULES_USE_ELF_RELA
3435
select ODD_RT_SIGACTION
3536
select OLD_SIGSUSPEND

arch/alpha/mm/fault.c

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
119119
flags |= FAULT_FLAG_USER;
120120
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121121
retry:
122-
mmap_read_lock(mm);
123-
vma = find_vma(mm, address);
122+
vma = lock_mm_and_find_vma(mm, address, regs);
124123
if (!vma)
125-
goto bad_area;
126-
if (vma->vm_start <= address)
127-
goto good_area;
128-
if (!(vma->vm_flags & VM_GROWSDOWN))
129-
goto bad_area;
130-
if (expand_stack(vma, address))
131-
goto bad_area;
124+
goto bad_area_nosemaphore;
132125

133126
/* Ok, we have a good vm_area for this memory access, so
134127
we can handle it. */
135-
good_area:
136128
si_code = SEGV_ACCERR;
137129
if (cause < 0) {
138130
if (!(vma->vm_flags & VM_EXEC))
@@ -192,6 +184,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
192184
bad_area:
193185
mmap_read_unlock(mm);
194186

187+
bad_area_nosemaphore:
195188
if (user_mode(regs))
196189
goto do_sigsegv;
197190

arch/arc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ config ARC
4141
select HAVE_PERF_EVENTS
4242
select HAVE_SYSCALL_TRACEPOINTS
4343
select IRQ_DOMAIN
44+
select LOCK_MM_AND_FIND_VMA
4445
select MODULES_USE_ELF_RELA
4546
select OF
4647
select OF_EARLY_FLATTREE

arch/arc/mm/fault.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
113113

114114
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115115
retry:
116-
mmap_read_lock(mm);
117-
118-
vma = find_vma(mm, address);
116+
vma = lock_mm_and_find_vma(mm, address, regs);
119117
if (!vma)
120-
goto bad_area;
121-
if (unlikely(address < vma->vm_start)) {
122-
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123-
goto bad_area;
124-
}
118+
goto bad_area_nosemaphore;
125119

126120
/*
127121
* vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
161155
bad_area:
162156
mmap_read_unlock(mm);
163157

158+
bad_area_nosemaphore:
164159
/*
165160
* Major/minor page fault accounting
166161
* (in case of retry we only land here once)

arch/csky/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ config CSKY
9696
select HAVE_REGS_AND_STACK_ACCESS_API
9797
select HAVE_STACKPROTECTOR
9898
select HAVE_SYSCALL_TRACEPOINTS
99+
select LOCK_MM_AND_FIND_VMA
99100
select MAY_HAVE_SPARSE_IRQ
100101
select MODULES_USE_ELF_RELA if MODULES
101102
select OF

arch/csky/mm/fault.c

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
9797
BUG();
9898
}
9999

100-
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
100+
static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101101
{
102102
/*
103103
* Something tried to access memory that isn't in our memory map.
104104
* Fix it, but check if it's kernel or user first.
105105
*/
106-
mmap_read_unlock(mm);
107106
/* User mode accesses just cause a SIGSEGV */
108107
if (user_mode(regs)) {
109108
do_trap(regs, SIGSEGV, code, addr);
@@ -238,32 +237,21 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
238237
if (is_write(regs))
239238
flags |= FAULT_FLAG_WRITE;
240239
retry:
241-
mmap_read_lock(mm);
242-
vma = find_vma(mm, addr);
240+
vma = lock_mm_and_find_vma(mm, address, regs);
243241
if (unlikely(!vma)) {
244-
bad_area(regs, mm, code, addr);
245-
return;
246-
}
247-
if (likely(vma->vm_start <= addr))
248-
goto good_area;
249-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250-
bad_area(regs, mm, code, addr);
251-
return;
252-
}
253-
if (unlikely(expand_stack(vma, addr))) {
254-
bad_area(regs, mm, code, addr);
242+
bad_area_nosemaphore(regs, mm, code, addr);
255243
return;
256244
}
257245

258246
/*
259247
* Ok, we have a good vm_area for this memory access, so
260248
* we can handle it.
261249
*/
262-
good_area:
263250
code = SEGV_ACCERR;
264251

265252
if (unlikely(access_error(regs, vma))) {
266-
bad_area(regs, mm, code, addr);
253+
mmap_read_unlock(mm);
254+
bad_area_nosemaphore(regs, mm, code, addr);
267255
return;
268256
}
269257

arch/hexagon/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config HEXAGON
2828
select GENERIC_SMP_IDLE_THREAD
2929
select STACKTRACE_SUPPORT
3030
select GENERIC_CLOCKEVENTS_BROADCAST
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select GENERIC_CPU_DEVICES
3334
select ARCH_WANT_LD_ORPHAN_WARN

arch/hexagon/mm/vm_fault.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
5757

5858
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
5959
retry:
60-
mmap_read_lock(mm);
61-
vma = find_vma(mm, address);
62-
if (!vma)
63-
goto bad_area;
60+
vma = lock_mm_and_find_vma(mm, address, regs);
61+
if (unlikely(!vma))
62+
goto bad_area_nosemaphore;
6463

65-
if (vma->vm_start <= address)
66-
goto good_area;
67-
68-
if (!(vma->vm_flags & VM_GROWSDOWN))
69-
goto bad_area;
70-
71-
if (expand_stack(vma, address))
72-
goto bad_area;
73-
74-
good_area:
7564
/* Address space is OK. Now check access rights. */
7665
si_code = SEGV_ACCERR;
7766

@@ -143,6 +132,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
143132
bad_area:
144133
mmap_read_unlock(mm);
145134

135+
bad_area_nosemaphore:
146136
if (user_mode(regs)) {
147137
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
148138
return;

arch/loongarch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ config LOONGARCH
130130
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
131131
select IRQ_FORCED_THREADING
132132
select IRQ_LOONGARCH_CPU
133+
select LOCK_MM_AND_FIND_VMA
133134
select MMU_GATHER_MERGE_VMAS if MMU
134135
select MODULES_USE_ELF_RELA if MODULES
135136
select NEED_PER_CPU_EMBED_FIRST_CHUNK

arch/loongarch/mm/fault.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -169,22 +169,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
169169

170170
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
171171
retry:
172-
mmap_read_lock(mm);
173-
vma = find_vma(mm, address);
174-
if (!vma)
175-
goto bad_area;
176-
if (vma->vm_start <= address)
177-
goto good_area;
178-
if (!(vma->vm_flags & VM_GROWSDOWN))
179-
goto bad_area;
180-
if (!expand_stack(vma, address))
181-
goto good_area;
172+
vma = lock_mm_and_find_vma(mm, address, regs);
173+
if (unlikely(!vma))
174+
goto bad_area_nosemaphore;
175+
goto good_area;
176+
182177
/*
183178
* Something tried to access memory that isn't in our memory map..
184179
* Fix it, but check if it's kernel or user first..
185180
*/
186181
bad_area:
187182
mmap_read_unlock(mm);
183+
bad_area_nosemaphore:
188184
do_sigsegv(regs, write, address, si_code);
189185
return;
190186

0 commit comments

Comments
 (0)