Skip to content

Commit a414741

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: resource, squashfs, hfsplus, modprobe, and mm (hugetlb, slub, userfaultfd, ksm, pagealloc, kasan, pagemap, and ioremap)" * emailed patches from Andrew Morton <[email protected]>: mm/ioremap: fix iomap_max_page_shift docs: admin-guide: update description for kernel.modprobe sysctl hfsplus: prevent corruption in shrinking truncate mm/filemap: fix readahead return types kasan: fix unit tests with CONFIG_UBSAN_LOCAL_BOUNDS enabled mm: fix struct page layout on 32-bit systems ksm: revert "use GET_KSM_PAGE_NOLOCK to get ksm page in remove_rmap_item_from_tree()" userfaultfd: release page in error path to avoid BUG_ON squashfs: fix divide error in calculate_skip() kernel/resource: fix return code check in __request_free_mem_region mm, slub: move slub_debug static key enabling outside slab_mutex mm/hugetlb: fix cow where page writtable in child mm/hugetlb: fix F_SEAL_FUTURE_WRITE
2 parents f36edc5 + 86d0c16 commit a414741

File tree

18 files changed

+129
-62
lines changed

18 files changed

+129
-62
lines changed

Documentation/admin-guide/sysctl/kernel.rst

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -483,10 +483,11 @@ modprobe
483483
========
484484

485485
The full path to the usermode helper for autoloading kernel modules,
486-
by default "/sbin/modprobe". This binary is executed when the kernel
487-
requests a module. For example, if userspace passes an unknown
488-
filesystem type to mount(), then the kernel will automatically request
489-
the corresponding filesystem module by executing this usermode helper.
486+
by default ``CONFIG_MODPROBE_PATH``, which in turn defaults to
487+
"/sbin/modprobe". This binary is executed when the kernel requests a
488+
module. For example, if userspace passes an unknown filesystem type
489+
to mount(), then the kernel will automatically request the
490+
corresponding filesystem module by executing this usermode helper.
490491
This usermode helper should insert the needed module into the kernel.
491492

492493
This sysctl only affects module autoloading. It has no effect on the

fs/hfsplus/extents.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -598,21 +598,22 @@ void hfsplus_file_truncate(struct inode *inode)
598598
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
599599
if (res)
600600
break;
601-
hfs_brec_remove(&fd);
602601

603-
mutex_unlock(&fd.tree->tree_lock);
604602
start = hip->cached_start;
603+
if (blk_cnt <= start)
604+
hfs_brec_remove(&fd);
605+
mutex_unlock(&fd.tree->tree_lock);
605606
hfsplus_free_extents(sb, hip->cached_extents,
606607
alloc_cnt - start, alloc_cnt - blk_cnt);
607608
hfsplus_dump_extent(hip->cached_extents);
609+
mutex_lock(&fd.tree->tree_lock);
608610
if (blk_cnt > start) {
609611
hip->extent_state |= HFSPLUS_EXT_DIRTY;
610612
break;
611613
}
612614
alloc_cnt = start;
613615
hip->cached_start = hip->cached_blocks = 0;
614616
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
615-
mutex_lock(&fd.tree->tree_lock);
616617
}
617618
hfs_find_exit(&fd);
618619

fs/hugetlbfs/inode.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
131131
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132132
{
133133
struct inode *inode = file_inode(file);
134+
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
134135
loff_t len, vma_len;
135136
int ret;
136137
struct hstate *h = hstate_file(file);
@@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
146147
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
147148
vma->vm_ops = &hugetlb_vm_ops;
148149

150+
ret = seal_check_future_write(info->seals, vma);
151+
if (ret)
152+
return ret;
153+
149154
/*
150155
* page based offset in vm_pgoff could be sufficiently large to
151156
* overflow a loff_t when converted to byte offset. This can

fs/iomap/buffered-io.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -394,15 +394,15 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
394394
{
395395
struct inode *inode = rac->mapping->host;
396396
loff_t pos = readahead_pos(rac);
397-
loff_t length = readahead_length(rac);
397+
size_t length = readahead_length(rac);
398398
struct iomap_readpage_ctx ctx = {
399399
.rac = rac,
400400
};
401401

402402
trace_iomap_readahead(inode, readahead_count(rac));
403403

404404
while (length > 0) {
405-
loff_t ret = iomap_apply(inode, pos, length, 0, ops,
405+
ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
406406
&ctx, iomap_readahead_actor);
407407
if (ret <= 0) {
408408
WARN_ON_ONCE(ret == 0);

fs/squashfs/file.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -211,11 +211,11 @@ static long long read_indexes(struct super_block *sb, int n,
211211
* If the skip factor is limited in this way then the file will use multiple
212212
* slots.
213213
*/
214-
static inline int calculate_skip(int blocks)
214+
static inline int calculate_skip(u64 blocks)
215215
{
216-
int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
216+
u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
217217
* SQUASHFS_META_INDEXES);
218-
return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
218+
return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
219219
}
220220

221221

include/linux/mm.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3216,5 +3216,37 @@ void mem_dump_obj(void *object);
32163216
static inline void mem_dump_obj(void *object) {}
32173217
#endif
32183218

3219+
/**
3220+
* seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
3221+
* @seals: the seals to check
3222+
* @vma: the vma to operate on
3223+
*
3224+
* Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
3225+
* the vma flags. Return 0 if check pass, or <0 for errors.
3226+
*/
3227+
static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3228+
{
3229+
if (seals & F_SEAL_FUTURE_WRITE) {
3230+
/*
3231+
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
3232+
* "future write" seal active.
3233+
*/
3234+
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3235+
return -EPERM;
3236+
3237+
/*
3238+
* Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
3239+
* MAP_SHARED and read-only, take care to not allow mprotect to
3240+
* revert protections on such mappings. Do this only for shared
3241+
* mappings. For private mappings, don't need to mask
3242+
* VM_MAYWRITE as we still want them to be COW-writable.
3243+
*/
3244+
if (vma->vm_flags & VM_SHARED)
3245+
vma->vm_flags &= ~(VM_MAYWRITE);
3246+
}
3247+
3248+
return 0;
3249+
}
3250+
32193251
#endif /* __KERNEL__ */
32203252
#endif /* _LINUX_MM_H */

include/linux/mm_types.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,10 @@ struct page {
9797
};
9898
struct { /* page_pool used by netstack */
9999
/**
100-
* @dma_addr: might require a 64-bit value even on
100+
* @dma_addr: might require a 64-bit value on
101101
* 32-bit architectures.
102102
*/
103-
dma_addr_t dma_addr;
103+
unsigned long dma_addr[2];
104104
};
105105
struct { /* slab, slob and slub */
106106
union {

include/linux/pagemap.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -997,9 +997,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
997997
* readahead_length - The number of bytes in this readahead request.
998998
* @rac: The readahead request.
999999
*/
1000-
static inline loff_t readahead_length(struct readahead_control *rac)
1000+
static inline size_t readahead_length(struct readahead_control *rac)
10011001
{
1002-
return (loff_t)rac->_nr_pages * PAGE_SIZE;
1002+
return rac->_nr_pages * PAGE_SIZE;
10031003
}
10041004

10051005
/**
@@ -1024,7 +1024,7 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
10241024
* readahead_batch_length - The number of bytes in the current batch.
10251025
* @rac: The readahead request.
10261026
*/
1027-
static inline loff_t readahead_batch_length(struct readahead_control *rac)
1027+
static inline size_t readahead_batch_length(struct readahead_control *rac)
10281028
{
10291029
return rac->_batch_count * PAGE_SIZE;
10301030
}

include/net/page_pool.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
198198

199199
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
200200
{
201-
return page->dma_addr;
201+
dma_addr_t ret = page->dma_addr[0];
202+
if (sizeof(dma_addr_t) > sizeof(unsigned long))
203+
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
204+
return ret;
205+
}
206+
207+
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
208+
{
209+
page->dma_addr[0] = addr;
210+
if (sizeof(dma_addr_t) > sizeof(unsigned long))
211+
page->dma_addr[1] = upper_32_bits(addr);
202212
}
203213

204214
static inline bool is_page_pool_compiled_in(void)

kernel/resource.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1805,7 +1805,7 @@ static struct resource *__request_free_mem_region(struct device *dev,
18051805
REGION_DISJOINT)
18061806
continue;
18071807

1808-
if (!__request_region_locked(res, &iomem_resource, addr, size,
1808+
if (__request_region_locked(res, &iomem_resource, addr, size,
18091809
name, 0))
18101810
break;
18111811

0 commit comments

Comments
 (0)