Skip to content

Commit 68845a5

Browse files
committed
Merge branch 'akpm' into master (patches from Andrew)
Merge misc fixes from Andrew Morton: "Subsystems affected by this patch series: mm/pagemap, mm/shmem, mm/hotfixes, mm/memcg, mm/hugetlb, mailmap, squashfs, scripts, io-mapping, MAINTAINERS, and gdb" * emailed patches from Andrew Morton <[email protected]>: scripts/gdb: fix lx-symbols 'gdb.error' while loading modules MAINTAINERS: add KCOV section io-mapping: indicate mapping failure scripts/decode_stacktrace: strip basepath from all paths squashfs: fix length field overlap check in metadata reading mailmap: add entry for Mike Rapoport khugepaged: fix null-pointer dereference due to race mm/hugetlb: avoid hardcoding while checking if cma is enabled mm: memcg/slab: fix memory leak at non-root kmem_cache destroy mm/memcg: fix refcount error while moving and swapping mm/memcontrol: fix OOPS inside mem_cgroup_get_nr_swap_pages() mm: initialize return of vm_insert_pages vfs/xattr: mm/shmem: kernfs: release simple xattr entry in a right way mm/mmap.c: close race between munmap() and expand_upwards()/downwards()
2 parents c953d60 + 7359608 commit 68845a5

File tree

14 files changed

+91
-25
lines changed

14 files changed

+91
-25
lines changed

.mailmap

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,9 @@ Maxime Ripard <[email protected]> <[email protected]>
198198
Mayuresh Janorkar <[email protected]>
199199
Michael Buesch <[email protected]>
200200
Michel Dänzer <[email protected]>
201+
202+
203+
201204
202205
203206
Mitesh shah <[email protected]>

MAINTAINERS

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9306,6 +9306,17 @@ F: Documentation/kbuild/kconfig*
93069306
F: scripts/Kconfig.include
93079307
F: scripts/kconfig/
93089308

9309+
KCOV
9310+
R: Dmitry Vyukov <[email protected]>
9311+
R: Andrey Konovalov <[email protected]>
9312+
9313+
S: Maintained
9314+
F: Documentation/dev-tools/kcov.rst
9315+
F: include/linux/kcov.h
9316+
F: include/uapi/linux/kcov.h
9317+
F: kernel/kcov.c
9318+
F: scripts/Makefile.kcov
9319+
93099320
KCSAN
93109321
M: Marco Elver <[email protected]>
93119322
R: Dmitry Vyukov <[email protected]>

fs/squashfs/block.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
175175
/* Extract the length of the metadata block */
176176
data = page_address(bvec->bv_page) + bvec->bv_offset;
177177
length = data[offset];
178-
if (offset <= bvec->bv_len - 1) {
178+
if (offset < bvec->bv_len - 1) {
179179
length |= data[offset + 1] << 8;
180180
} else {
181181
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {

include/linux/io-mapping.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap,
107107
resource_size_t base,
108108
unsigned long size)
109109
{
110+
iomap->iomem = ioremap_wc(base, size);
111+
if (!iomap->iomem)
112+
return NULL;
113+
110114
iomap->base = base;
111115
iomap->size = size;
112-
iomap->iomem = ioremap_wc(base, size);
113116
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
114117
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
115118
#elif defined(pgprot_writecombine)

include/linux/xattr.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/slab.h>
1616
#include <linux/types.h>
1717
#include <linux/spinlock.h>
18+
#include <linux/mm.h>
1819
#include <uapi/linux/xattr.h>
1920

2021
struct inode;
@@ -94,7 +95,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
9495

9596
list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
9697
kfree(xattr->name);
97-
kfree(xattr);
98+
kvfree(xattr);
9899
}
99100
}
100101

mm/hugetlb.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly;
4545
unsigned int default_hstate_idx;
4646
struct hstate hstates[HUGE_MAX_HSTATE];
4747

48+
#ifdef CONFIG_CMA
4849
static struct cma *hugetlb_cma[MAX_NUMNODES];
50+
#endif
51+
static unsigned long hugetlb_cma_size __initdata;
4952

5053
/*
5154
* Minimum page order among possible hugepage sizes, set to a proper value
@@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order)
12351238
* If the page isn't allocated using the cma allocator,
12361239
* cma_release() returns false.
12371240
*/
1238-
if (IS_ENABLED(CONFIG_CMA) &&
1239-
cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1241+
#ifdef CONFIG_CMA
1242+
if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
12401243
return;
1244+
#endif
12411245

12421246
free_contig_range(page_to_pfn(page), 1 << order);
12431247
}
@@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
12481252
{
12491253
unsigned long nr_pages = 1UL << huge_page_order(h);
12501254

1251-
if (IS_ENABLED(CONFIG_CMA)) {
1255+
#ifdef CONFIG_CMA
1256+
{
12521257
struct page *page;
12531258
int node;
12541259

@@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
12621267
return page;
12631268
}
12641269
}
1270+
#endif
12651271

12661272
return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
12671273
}
@@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
25712577

25722578
for (i = 0; i < h->max_huge_pages; ++i) {
25732579
if (hstate_is_gigantic(h)) {
2574-
if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) {
2580+
if (hugetlb_cma_size) {
25752581
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
25762582
break;
25772583
}
@@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
56545660
}
56555661

56565662
#ifdef CONFIG_CMA
5657-
static unsigned long hugetlb_cma_size __initdata;
56585663
static bool cma_reserve_called __initdata;
56595664

56605665
static int __init cmdline_parse_hugetlb_cma(char *p)

mm/khugepaged.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
958958
return SCAN_ADDRESS_RANGE;
959959
if (!hugepage_vma_check(vma, vma->vm_flags))
960960
return SCAN_VMA_CHECK;
961+
/* Anon VMA expected */
962+
if (!vma->anon_vma || vma->vm_ops)
963+
return SCAN_VMA_CHECK;
961964
return 0;
962965
}
963966

mm/memcontrol.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void)
56695669
if (!mem_cgroup_is_root(mc.to))
56705670
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
56715671

5672-
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
56735672
css_put_many(&mc.to->css, mc.moved_swap);
56745673

56755674
mc.moved_swap = 0;
@@ -5860,7 +5859,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
58605859
ent = target.ent;
58615860
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
58625861
mc.precharge--;
5863-
/* we fixup refcnts and charges later. */
5862+
mem_cgroup_id_get_many(mc.to, 1);
5863+
/* we fixup other refcnts and charges later. */
58645864
mc.moved_swap++;
58655865
}
58665866
break;
@@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = {
71867186
{ }, /* terminate */
71877187
};
71887188

7189+
/*
7190+
* If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7191+
* instead of a core_initcall(), this could mean cgroup_memory_noswap still
7192+
* remains set to false even when memcg is disabled via "cgroup_disable=memory"
7193+
* boot parameter. This may result in premature OOPS inside
7194+
* mem_cgroup_get_nr_swap_pages() function in corner cases.
7195+
*/
71897196
static int __init mem_cgroup_swap_init(void)
71907197
{
71917198
/* No memory control -> no swap control */
@@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void)
72007207

72017208
return 0;
72027209
}
7203-
subsys_initcall(mem_cgroup_swap_init);
7210+
core_initcall(mem_cgroup_swap_init);
72047211

72057212
#endif /* CONFIG_MEMCG_SWAP */

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1601,7 +1601,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
16011601
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
16021602
#else
16031603
unsigned long idx = 0, pgcount = *num;
1604-
int err;
1604+
int err = -EINVAL;
16051605

16061606
for (; idx < pgcount; ++idx) {
16071607
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);

mm/mmap.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm,
26202620
* Create a list of vma's touched by the unmap, removing them from the mm's
26212621
* vma list as we go..
26222622
*/
2623-
static void
2623+
static bool
26242624
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
26252625
struct vm_area_struct *prev, unsigned long end)
26262626
{
@@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
26452645

26462646
/* Kill the cache */
26472647
vmacache_invalidate(mm);
2648+
2649+
/*
2650+
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
2651+
* VM_GROWSUP VMA. Such VMAs can change their size under
2652+
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
2653+
*/
2654+
if (vma && (vma->vm_flags & VM_GROWSDOWN))
2655+
return false;
2656+
if (prev && (prev->vm_flags & VM_GROWSUP))
2657+
return false;
2658+
return true;
26482659
}
26492660

26502661
/*
@@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
28252836
}
28262837

28272838
/* Detach vmas from rbtree */
2828-
detach_vmas_to_be_unmapped(mm, vma, prev, end);
2839+
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
2840+
downgrade = false;
28292841

28302842
if (downgrade)
28312843
mmap_write_downgrade(mm);

0 commit comments

Comments
 (0)