Skip to content

Commit 8d7071a

Browse files
committed
mm: always expand the stack with the mmap write lock held
This finishes the job of always holding the mmap write lock when extending the user stack vma, and removes the 'write_locked' argument from the vm helper functions again. For some cases, we just avoid expanding the stack at all: drivers and page pinning really shouldn't be extending any stacks. Let's see if any strange users really wanted that. It's worth noting that architectures that weren't converted to the new lock_mm_and_find_vma() helper function are left using the legacy "expand_stack()" function, but it has been changed to drop the mmap_lock and take it for writing while expanding the vma. This makes it fairly straightforward to convert the remaining architectures. As a result of dropping and re-taking the lock, the calling conventions for this function have also changed, since the old vma may no longer be valid. So it will now return the new vma if successful, and NULL - and the lock dropped - if the area could not be extended. Tested-by: Vegard Nossum <[email protected]> Tested-by: John Paul Adrian Glaubitz <[email protected]> # ia64 Tested-by: Frank Scheiner <[email protected]> # ia64 Signed-off-by: Linus Torvalds <[email protected]>
1 parent f313c51 commit 8d7071a

File tree

17 files changed

+169
-116
lines changed

17 files changed

+169
-116
lines changed

arch/ia64/mm/fault.c

Lines changed: 6 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -110,10 +110,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
110110
* register backing store that needs to expand upwards, in
111111
* this case vma will be null, but prev_vma will ne non-null
112112
*/
113-
if (( !vma && prev_vma ) || (address < vma->vm_start) )
114-
goto check_expansion;
113+
if (( !vma && prev_vma ) || (address < vma->vm_start) ) {
114+
vma = expand_stack(mm, address);
115+
if (!vma)
116+
goto bad_area_nosemaphore;
117+
}
115118

116-
good_area:
117119
code = SEGV_ACCERR;
118120

119121
/* OK, we've got a good vm_area for this memory area. Check the access permissions: */
@@ -177,35 +179,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
177179
mmap_read_unlock(mm);
178180
return;
179181

180-
check_expansion:
181-
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
182-
if (!vma)
183-
goto bad_area;
184-
if (!(vma->vm_flags & VM_GROWSDOWN))
185-
goto bad_area;
186-
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
187-
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
188-
goto bad_area;
189-
if (expand_stack(vma, address))
190-
goto bad_area;
191-
} else {
192-
vma = prev_vma;
193-
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
194-
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
195-
goto bad_area;
196-
/*
197-
* Since the register backing store is accessed sequentially,
198-
* we disallow growing it by more than a page at a time.
199-
*/
200-
if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
201-
goto bad_area;
202-
if (expand_upwards(vma, address))
203-
goto bad_area;
204-
}
205-
goto good_area;
206-
207182
bad_area:
208183
mmap_read_unlock(mm);
184+
bad_area_nosemaphore:
209185
if ((isr & IA64_ISR_SP)
210186
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
211187
{

arch/m68k/mm/fault.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
105105
if (address + 256 < rdusp())
106106
goto map_err;
107107
}
108-
if (expand_stack(vma, address))
109-
goto map_err;
108+
vma = expand_stack(mm, address);
109+
if (!vma)
110+
goto map_err_nosemaphore;
110111

111112
/*
112113
* Ok, we have a good vm_area for this memory access, so
@@ -196,10 +197,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
196197
goto send_sig;
197198

198199
map_err:
200+
mmap_read_unlock(mm);
201+
map_err_nosemaphore:
199202
current->thread.signo = SIGSEGV;
200203
current->thread.code = SEGV_MAPERR;
201204
current->thread.faddr = address;
202-
goto send_sig;
205+
return send_fault_sig(regs);
203206

204207
acc_err:
205208
current->thread.signo = SIGSEGV;

arch/microblaze/mm/fault.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
192192
&& (kernel_mode(regs) || !store_updates_sp(regs)))
193193
goto bad_area;
194194
}
195-
if (expand_stack(vma, address))
196-
goto bad_area;
195+
vma = expand_stack(mm, address);
196+
if (!vma)
197+
goto bad_area_nosemaphore;
197198

198199
good_area:
199200
code = SEGV_ACCERR;

arch/openrisc/mm/fault.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,8 +127,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
127127
if (address + PAGE_SIZE < regs->sp)
128128
goto bad_area;
129129
}
130-
if (expand_stack(vma, address))
131-
goto bad_area;
130+
vma = expand_stack(mm, address);
131+
if (!vma)
132+
goto bad_area_nosemaphore;
132133

133134
/*
134135
* Ok, we have a good vm_area for this memory access, so

arch/parisc/mm/fault.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -288,15 +288,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
288288
retry:
289289
mmap_read_lock(mm);
290290
vma = find_vma_prev(mm, address, &prev_vma);
291-
if (!vma || address < vma->vm_start)
292-
goto check_expansion;
291+
if (!vma || address < vma->vm_start) {
292+
if (!prev || !(prev->vm_flags & VM_GROWSUP))
293+
goto bad_area;
294+
vma = expand_stack(mm, address);
295+
if (!vma)
296+
goto bad_area_nosemaphore;
297+
}
298+
293299
/*
294300
* Ok, we have a good vm_area for this memory access. We still need to
295301
* check the access permissions.
296302
*/
297303

298-
good_area:
299-
300304
if ((vma->vm_flags & acc_type) != acc_type)
301305
goto bad_area;
302306

@@ -347,17 +351,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
347351
mmap_read_unlock(mm);
348352
return;
349353

350-
check_expansion:
351-
vma = prev_vma;
352-
if (vma && (expand_stack(vma, address) == 0))
353-
goto good_area;
354-
355354
/*
356355
* Something tried to access memory that isn't in our memory map..
357356
*/
358357
bad_area:
359358
mmap_read_unlock(mm);
360359

360+
bad_area_nosemaphore:
361361
if (user_mode(regs)) {
362362
int signo, si_code;
363363

@@ -449,7 +449,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
449449
{
450450
unsigned long insn = regs->iir;
451451
int breg, treg, xreg, val = 0;
452-
struct vm_area_struct *vma, *prev_vma;
452+
struct vm_area_struct *vma;
453453
struct task_struct *tsk;
454454
struct mm_struct *mm;
455455
unsigned long address;
@@ -485,7 +485,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
485485
/* Search for VMA */
486486
address = regs->ior;
487487
mmap_read_lock(mm);
488-
vma = find_vma_prev(mm, address, &prev_vma);
488+
vma = vma_lookup(mm, address);
489489
mmap_read_unlock(mm);
490490

491491
/*
@@ -494,7 +494,6 @@ handle_nadtlb_fault(struct pt_regs *regs)
494494
*/
495495
acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
496496
if (vma
497-
&& address >= vma->vm_start
498497
&& (vma->vm_flags & acc_type) == acc_type)
499498
val = 1;
500499
}

arch/s390/mm/fault.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -457,8 +457,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
457457
if (unlikely(vma->vm_start > address)) {
458458
if (!(vma->vm_flags & VM_GROWSDOWN))
459459
goto out_up;
460-
if (expand_stack(vma, address))
461-
goto out_up;
460+
vma = expand_stack(mm, address);
461+
if (!vma)
462+
goto out;
462463
}
463464

464465
/*

arch/sparc/mm/fault_64.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -383,8 +383,9 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
383383
goto bad_area;
384384
}
385385
}
386-
if (expand_stack(vma, address))
387-
goto bad_area;
386+
vma = expand_stack(mm, address);
387+
if (!vma)
388+
goto bad_area_nosemaphore;
388389
/*
389390
* Ok, we have a good vm_area for this memory access, so
390391
* we can handle it..
@@ -487,8 +488,9 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
487488
* Fix it, but check if it's kernel or user first..
488489
*/
489490
bad_area:
490-
insn = get_fault_insn(regs, insn);
491491
mmap_read_unlock(mm);
492+
bad_area_nosemaphore:
493+
insn = get_fault_insn(regs, insn);
492494

493495
handle_kernel_fault:
494496
do_kernel_fault(regs, si_code, fault_code, insn, address);

arch/um/kernel/trap.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,15 @@ int handle_page_fault(unsigned long address, unsigned long ip,
4747
vma = find_vma(mm, address);
4848
if (!vma)
4949
goto out;
50-
else if (vma->vm_start <= address)
50+
if (vma->vm_start <= address)
5151
goto good_area;
52-
else if (!(vma->vm_flags & VM_GROWSDOWN))
52+
if (!(vma->vm_flags & VM_GROWSDOWN))
5353
goto out;
54-
else if (is_user && !ARCH_IS_STACKGROW(address))
55-
goto out;
56-
else if (expand_stack(vma, address))
54+
if (is_user && !ARCH_IS_STACKGROW(address))
5755
goto out;
56+
vma = expand_stack(mm, address);
57+
if (!vma)
58+
goto out_nosemaphore;
5859

5960
good_area:
6061
*code_out = SEGV_ACCERR;

drivers/iommu/amd/iommu_v2.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,8 +485,8 @@ static void do_fault(struct work_struct *work)
485485
flags |= FAULT_FLAG_REMOTE;
486486

487487
mmap_read_lock(mm);
488-
vma = find_extend_vma(mm, address);
489-
if (!vma || address < vma->vm_start)
488+
vma = vma_lookup(mm, address);
489+
if (!vma)
490490
/* failed to get a vma in the right range */
491491
goto out;
492492

drivers/iommu/iommu-sva.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
175175

176176
mmap_read_lock(mm);
177177

178-
vma = find_extend_vma(mm, prm->addr);
178+
vma = vma_lookup(mm, prm->addr);
179179
if (!vma)
180180
/* Unmapped area */
181181
goto out_put_mm;

0 commit comments

Comments
 (0)