Skip to content

Commit bb523b4

Browse files
author
Andreas Gruenbacher
committed
gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable}
Turn fault_in_pages_{readable,writeable} into versions that return the number of bytes not faulted in, similar to copy_to_user, instead of returning a non-zero value when any of the requested pages couldn't be faulted in. This supports the existing users that require all pages to be faulted in as well as new users that are happy if any pages can be faulted in. Rename the functions to fault_in_{readable,writeable} to make sure this change doesn't silently break things. Neither of these functions is entirely trivial and it doesn't seem useful to inline them, so move them to mm/gup.c. Signed-off-by: Andreas Gruenbacher <[email protected]>
1 parent 0c8eb28 commit bb523b4

File tree

10 files changed

+93
-76
lines changed

10 files changed

+93
-76
lines changed

arch/powerpc/kernel/kvm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,8 @@ static void __init kvm_use_magic_page(void)
669669
on_each_cpu(kvm_map_magic_page, &features, 1);
670670

671671
/* Quick self-test to see if the mapping works */
672-
if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
672+
if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
673+
sizeof(u32))) {
673674
kvm_patching_worked = false;
674675
return;
675676
}

arch/powerpc/kernel/signal_32.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,7 +1048,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
10481048
if (new_ctx == NULL)
10491049
return 0;
10501050
if (!access_ok(new_ctx, ctx_size) ||
1051-
fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1051+
fault_in_readable((char __user *)new_ctx, ctx_size))
10521052
return -EFAULT;
10531053

10541054
/*
@@ -1237,7 +1237,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
12371237
#endif
12381238

12391239
if (!access_ok(ctx, sizeof(*ctx)) ||
1240-
fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1240+
fault_in_readable((char __user *)ctx, sizeof(*ctx)))
12411241
return -EFAULT;
12421242

12431243
/*

arch/powerpc/kernel/signal_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -688,7 +688,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
688688
if (new_ctx == NULL)
689689
return 0;
690690
if (!access_ok(new_ctx, ctx_size) ||
691-
fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
691+
fault_in_readable((char __user *)new_ctx, ctx_size))
692692
return -EFAULT;
693693

694694
/*

arch/x86/kernel/fpu/signal.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
205205
fpregs_unlock();
206206

207207
if (ret) {
208-
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
208+
if (!fault_in_writeable(buf_fx, fpu_user_xstate_size))
209209
goto retry;
210210
return -EFAULT;
211211
}
@@ -278,10 +278,9 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
278278
if (ret != -EFAULT)
279279
return -EINVAL;
280280

281-
ret = fault_in_pages_readable(buf, size);
282-
if (!ret)
281+
if (!fault_in_readable(buf, size))
283282
goto retry;
284-
return ret;
283+
return -EFAULT;
285284
}
286285

287286
/*

drivers/gpu/drm/armada/armada_gem.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
336336
struct drm_armada_gem_pwrite *args = data;
337337
struct armada_gem_object *dobj;
338338
char __user *ptr;
339-
int ret;
339+
int ret = 0;
340340

341341
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
342342
args->handle, args->offset, args->size, args->ptr);
@@ -349,9 +349,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
349349
if (!access_ok(ptr, args->size))
350350
return -EFAULT;
351351

352-
ret = fault_in_pages_readable(ptr, args->size);
353-
if (ret)
354-
return ret;
352+
if (fault_in_readable(ptr, args->size))
353+
return -EFAULT;
355354

356355
dobj = armada_gem_object_lookup(file, args->handle);
357356
if (dobj == NULL)

fs/btrfs/ioctl.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2261,9 +2261,8 @@ static noinline int search_ioctl(struct inode *inode,
22612261
key.offset = sk->min_offset;
22622262

22632263
while (1) {
2264-
ret = fault_in_pages_writeable(ubuf + sk_offset,
2265-
*buf_size - sk_offset);
2266-
if (ret)
2264+
ret = -EFAULT;
2265+
if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
22672266
break;
22682267

22692268
ret = btrfs_search_forward(root, &key, path, sk->min_transid);

include/linux/pagemap.h

Lines changed: 3 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -733,61 +733,10 @@ int wait_on_page_private_2_killable(struct page *page);
733733
extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
734734

735735
/*
736-
* Fault everything in given userspace address range in.
736+
* Fault in userspace address range.
737737
*/
738-
static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
739-
{
740-
char __user *end = uaddr + size - 1;
741-
742-
if (unlikely(size == 0))
743-
return 0;
744-
745-
if (unlikely(uaddr > end))
746-
return -EFAULT;
747-
/*
748-
* Writing zeroes into userspace here is OK, because we know that if
749-
* the zero gets there, we'll be overwriting it.
750-
*/
751-
do {
752-
if (unlikely(__put_user(0, uaddr) != 0))
753-
return -EFAULT;
754-
uaddr += PAGE_SIZE;
755-
} while (uaddr <= end);
756-
757-
/* Check whether the range spilled into the next page. */
758-
if (((unsigned long)uaddr & PAGE_MASK) ==
759-
((unsigned long)end & PAGE_MASK))
760-
return __put_user(0, end);
761-
762-
return 0;
763-
}
764-
765-
static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
766-
{
767-
volatile char c;
768-
const char __user *end = uaddr + size - 1;
769-
770-
if (unlikely(size == 0))
771-
return 0;
772-
773-
if (unlikely(uaddr > end))
774-
return -EFAULT;
775-
776-
do {
777-
if (unlikely(__get_user(c, uaddr) != 0))
778-
return -EFAULT;
779-
uaddr += PAGE_SIZE;
780-
} while (uaddr <= end);
781-
782-
/* Check whether the range spilled into the next page. */
783-
if (((unsigned long)uaddr & PAGE_MASK) ==
784-
((unsigned long)end & PAGE_MASK)) {
785-
return __get_user(c, end);
786-
}
787-
788-
(void)c;
789-
return 0;
790-
}
738+
size_t fault_in_writeable(char __user *uaddr, size_t size);
739+
size_t fault_in_readable(const char __user *uaddr, size_t size);
791740

792741
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
793742
pgoff_t index, gfp_t gfp_mask);

lib/iov_iter.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
191191
buf = iov->iov_base + skip;
192192
copy = min(bytes, iov->iov_len - skip);
193193

194-
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
194+
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
195195
kaddr = kmap_atomic(page);
196196
from = kaddr + offset;
197197

@@ -275,7 +275,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
275275
buf = iov->iov_base + skip;
276276
copy = min(bytes, iov->iov_len - skip);
277277

278-
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
278+
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
279279
kaddr = kmap_atomic(page);
280280
to = kaddr + offset;
281281

@@ -446,13 +446,11 @@ int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
446446
bytes = i->count;
447447
for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
448448
size_t len = min(bytes, p->iov_len - skip);
449-
int err;
450449

451450
if (unlikely(!len))
452451
continue;
453-
err = fault_in_pages_readable(p->iov_base + skip, len);
454-
if (unlikely(err))
455-
return err;
452+
if (fault_in_readable(p->iov_base + skip, len))
453+
return -EFAULT;
456454
bytes -= len;
457455
}
458456
}

mm/filemap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@
9090
* ->lock_page (filemap_fault, access_process_vm)
9191
*
9292
* ->i_rwsem (generic_perform_write)
93-
* ->mmap_lock (fault_in_pages_readable->do_page_fault)
93+
* ->mmap_lock (fault_in_readable->do_page_fault)
9494
*
9595
* bdi->wb.list_lock
9696
* sb_lock (fs/fs-writeback.c)

mm/gup.c

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1656,6 +1656,78 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
16561656
}
16571657
#endif /* !CONFIG_MMU */
16581658

1659+
/**
1660+
* fault_in_writeable - fault in userspace address range for writing
1661+
* @uaddr: start of address range
1662+
* @size: size of address range
1663+
*
1664+
* Returns the number of bytes not faulted in (like copy_to_user() and
1665+
* copy_from_user()).
1666+
*/
1667+
size_t fault_in_writeable(char __user *uaddr, size_t size)
1668+
{
1669+
char __user *start = uaddr, *end;
1670+
1671+
if (unlikely(size == 0))
1672+
return 0;
1673+
if (!PAGE_ALIGNED(uaddr)) {
1674+
if (unlikely(__put_user(0, uaddr) != 0))
1675+
return size;
1676+
uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1677+
}
1678+
end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1679+
if (unlikely(end < start))
1680+
end = NULL;
1681+
while (uaddr != end) {
1682+
if (unlikely(__put_user(0, uaddr) != 0))
1683+
goto out;
1684+
uaddr += PAGE_SIZE;
1685+
}
1686+
1687+
out:
1688+
if (size > uaddr - start)
1689+
return size - (uaddr - start);
1690+
return 0;
1691+
}
1692+
EXPORT_SYMBOL(fault_in_writeable);
1693+
1694+
/**
1695+
* fault_in_readable - fault in userspace address range for reading
1696+
* @uaddr: start of user address range
1697+
* @size: size of user address range
1698+
*
1699+
* Returns the number of bytes not faulted in (like copy_to_user() and
1700+
* copy_from_user()).
1701+
*/
1702+
size_t fault_in_readable(const char __user *uaddr, size_t size)
1703+
{
1704+
const char __user *start = uaddr, *end;
1705+
volatile char c;
1706+
1707+
if (unlikely(size == 0))
1708+
return 0;
1709+
if (!PAGE_ALIGNED(uaddr)) {
1710+
if (unlikely(__get_user(c, uaddr) != 0))
1711+
return size;
1712+
uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1713+
}
1714+
end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1715+
if (unlikely(end < start))
1716+
end = NULL;
1717+
while (uaddr != end) {
1718+
if (unlikely(__get_user(c, uaddr) != 0))
1719+
goto out;
1720+
uaddr += PAGE_SIZE;
1721+
}
1722+
1723+
out:
1724+
(void)c;
1725+
if (size > uaddr - start)
1726+
return size - (uaddr - start);
1727+
return 0;
1728+
}
1729+
EXPORT_SYMBOL(fault_in_readable);
1730+
16591731
/**
16601732
* get_dump_page() - pin user page in memory while writing it to core dump
16611733
* @addr: user address

0 commit comments

Comments
 (0)