Skip to content

Commit dd9c7df

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: mm (kasan, pagealloc, rmap, hmm, and hugetlb), and hfs" * emailed patches from Andrew Morton <[email protected]>: mm/hugetlb: fix refs calculation from unaligned @vaddr hfs: add lock nesting notation to hfs_find_init hfs: fix high memory mapping in hfs_bnode_read hfs: add missing clean-up in hfs_fill_super lib/test_hmm: remove set but unused page variable mm: fix the try_to_unmap prototype for !CONFIG_MMU mm/page_alloc: further fix __alloc_pages_bulk() return value mm/page_alloc: correct return value when failing at preparing mm/page_alloc: avoid page allocator recursion with pagesets.lock held Revert "mm/page_alloc: make should_fail_alloc_page() static" kasan: fix build by including kernel.h kasan: add memzero init for unaligned size at DEBUG mm: move helper to check slub_debug_enabled
2 parents a1c9ca5 + d08af0a commit dd9c7df

File tree

12 files changed

+96
-41
lines changed

12 files changed

+96
-41
lines changed

fs/hfs/bfind.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
2525
fd->key = ptr + tree->max_key_len + 2;
2626
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
2727
tree->cnid, __builtin_return_address(0));
28-
mutex_lock(&tree->tree_lock);
28+
switch (tree->cnid) {
29+
case HFS_CAT_CNID:
30+
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
31+
break;
32+
case HFS_EXT_CNID:
33+
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
34+
break;
35+
case HFS_ATTR_CNID:
36+
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
37+
break;
38+
default:
39+
return -EINVAL;
40+
}
2941
return 0;
3042
}
3143

fs/hfs/bnode.c

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,31 @@
1515

1616
#include "btree.h"
1717

18-
void hfs_bnode_read(struct hfs_bnode *node, void *buf,
19-
int off, int len)
18+
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
2019
{
2120
struct page *page;
21+
int pagenum;
22+
int bytes_read;
23+
int bytes_to_read;
24+
void *vaddr;
2225

2326
off += node->page_offset;
24-
page = node->page[0];
27+
pagenum = off >> PAGE_SHIFT;
28+
off &= ~PAGE_MASK; /* compute page offset for the first page */
2529

26-
memcpy(buf, kmap(page) + off, len);
27-
kunmap(page);
30+
for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
31+
if (pagenum >= node->tree->pages_per_bnode)
32+
break;
33+
page = node->page[pagenum];
34+
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
35+
36+
vaddr = kmap_atomic(page);
37+
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
38+
kunmap_atomic(vaddr);
39+
40+
pagenum++;
41+
off = 0; /* page offset only applies to the first page */
42+
}
2843
}
2944

3045
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)

fs/hfs/btree.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
1313

1414
#define NODE_HASH_SIZE 256
1515

16+
/* B-tree mutex nested subclasses */
17+
enum hfs_btree_mutex_classes {
18+
CATALOG_BTREE_MUTEX,
19+
EXTENTS_BTREE_MUTEX,
20+
ATTR_BTREE_MUTEX,
21+
};
22+
1623
/* A HFS BTree held in memory */
1724
struct hfs_btree {
1825
struct super_block *sb;

fs/hfs/super.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
420420
if (!res) {
421421
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
422422
res = -EIO;
423-
goto bail;
423+
goto bail_hfs_find;
424424
}
425425
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
426426
}
427-
if (res) {
428-
hfs_find_exit(&fd);
429-
goto bail_no_root;
430-
}
427+
if (res)
428+
goto bail_hfs_find;
431429
res = -EINVAL;
432430
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
433431
hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
443441
/* everything's okay */
444442
return 0;
445443

444+
bail_hfs_find:
445+
hfs_find_exit(&fd);
446446
bail_no_root:
447447
pr_err("get root inode failed\n");
448448
bail:

include/linux/kasan.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#define _LINUX_KASAN_H
44

55
#include <linux/bug.h>
6+
#include <linux/kernel.h>
67
#include <linux/static_key.h>
78
#include <linux/types.h>
89

include/linux/rmap.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked,
291291
return 0;
292292
}
293293

294-
#define try_to_unmap(page, refs) false
294+
static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
295+
{
296+
}
295297

296298
static inline int page_mkclean(struct page *page)
297299
{

lib/test_hmm.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -628,10 +628,8 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
628628

629629
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
630630
void *entry;
631-
struct page *page;
632631

633632
entry = xa_load(&dmirror->pt, pfn);
634-
page = xa_untag_pointer(entry);
635633
if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
636634
return -EPERM;
637635
}

mm/hugetlb.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
54405440
continue;
54415441
}
54425442

5443-
refs = min3(pages_per_huge_page(h) - pfn_offset,
5444-
(vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
5443+
/* vaddr may not be aligned to PAGE_SIZE */
5444+
refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
5445+
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
54455446

54465447
if (pages || vmas)
54475448
record_subpages_vmas(mem_map_offset(page, pfn_offset),

mm/kasan/kasan.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#ifdef CONFIG_KASAN_HW_TAGS
1010

1111
#include <linux/static_key.h>
12+
#include "../slab.h"
1213

1314
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
1415
extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
387388

388389
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
389390
return;
391+
/*
392+
* Explicitly initialize the memory with the precise object size to
393+
* avoid overwriting the SLAB redzone. This disables initialization in
394+
* the arch code and may thus lead to performance penalty. The penalty
395+
* is accepted since SLAB redzones aren't enabled in production builds.
396+
*/
397+
if (__slub_debug_enabled() &&
398+
init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
399+
init = false;
400+
memzero_explicit((void *)addr, size);
401+
}
390402
size = round_up(size, KASAN_GRANULE_SIZE);
391403

392404
hw_set_mem_tag_range((void *)addr, size, tag, init);

mm/page_alloc.c

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
38203820

38213821
#endif /* CONFIG_FAIL_PAGE_ALLOC */
38223822

3823-
static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3823+
noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
38243824
{
38253825
return __should_fail_alloc_page(gfp_mask, order);
38263826
}
@@ -5221,29 +5221,42 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
52215221
unsigned int alloc_flags = ALLOC_WMARK_LOW;
52225222
int nr_populated = 0, nr_account = 0;
52235223

5224-
if (unlikely(nr_pages <= 0))
5225-
return 0;
5226-
52275224
/*
52285225
* Skip populated array elements to determine if any pages need
52295226
* to be allocated before disabling IRQs.
52305227
*/
52315228
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
52325229
nr_populated++;
52335230

5231+
/* No pages requested? */
5232+
if (unlikely(nr_pages <= 0))
5233+
goto out;
5234+
52345235
/* Already populated array? */
52355236
if (unlikely(page_array && nr_pages - nr_populated == 0))
5236-
return nr_populated;
5237+
goto out;
52375238

52385239
/* Use the single page allocator for one page. */
52395240
if (nr_pages - nr_populated == 1)
52405241
goto failed;
52415242

5243+
#ifdef CONFIG_PAGE_OWNER
5244+
/*
5245+
* PAGE_OWNER may recurse into the allocator to allocate space to
5246+
* save the stack with pagesets.lock held. Releasing/reacquiring
5247+
* removes much of the performance benefit of bulk allocation so
5248+
* force the caller to allocate one page at a time as it'll have
5249+
* similar performance to added complexity to the bulk allocator.
5250+
*/
5251+
if (static_branch_unlikely(&page_owner_inited))
5252+
goto failed;
5253+
#endif
5254+
52425255
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
52435256
gfp &= gfp_allowed_mask;
52445257
alloc_gfp = gfp;
52455258
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5246-
return 0;
5259+
goto out;
52475260
gfp = alloc_gfp;
52485261

52495262
/* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53115324
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
53125325
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
53135326

5327+
out:
53145328
return nr_populated;
53155329

53165330
failed_irq:
@@ -5326,7 +5340,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53265340
nr_populated++;
53275341
}
53285342

5329-
return nr_populated;
5343+
goto out;
53305344
}
53315345
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
53325346

0 commit comments

Comments
 (0)