Skip to content

Commit f9aaa5b

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 patches. Subsystems affected by this patch series: ipc, MAINTAINERS, and mm (vmscan, debug, pagemap, kmemleak, and selftests)" * emailed patches from Andrew Morton <[email protected]>: kselftest/vm: revert "tools/testing/selftests/vm/userfaultfd.c: use swap() to make code cleaner" MAINTAINERS: update rppt's email mm/kmemleak: avoid scanning potential huge holes ipc/sem: do not sleep with a spin lock held mm/pgtable: define pte_index so that preprocessor could recognize it mm/page_table_check: check entries at pmd levels mm/khugepaged: unify collapse pmd clear, flush and free mm/page_table_check: use unsigned long for page counters and cleanup mm/debug_vm_pgtable: remove pte entry from the page table Revert "mm/page_isolation: unset migratetype directly for non Buddy page"
2 parents cff7f22 + 07d2505 commit f9aaa5b

File tree

10 files changed

+88
-56
lines changed

10 files changed

+88
-56
lines changed

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12399,7 +12399,7 @@ F: include/uapi/linux/membarrier.h
1239912399
F: kernel/sched/membarrier.c
1240012400

1240112401
MEMBLOCK
12402-
M: Mike Rapoport <rppt@linux.ibm.com>
12402+
M: Mike Rapoport <rppt@kernel.org>
1240312403
1240412404
S: Maintained
1240512405
F: Documentation/core-api/boot-time-mm.rst

include/linux/page_table_check.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
2626
pmd_t *pmdp, pmd_t pmd);
2727
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
2828
pud_t *pudp, pud_t pud);
29+
void __page_table_check_pte_clear_range(struct mm_struct *mm,
30+
unsigned long addr,
31+
pmd_t pmd);
2932

3033
static inline void page_table_check_alloc(struct page *page, unsigned int order)
3134
{
@@ -100,6 +103,16 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
100103
__page_table_check_pud_set(mm, addr, pudp, pud);
101104
}
102105

106+
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
107+
unsigned long addr,
108+
pmd_t pmd)
109+
{
110+
if (static_branch_likely(&page_table_check_disabled))
111+
return;
112+
113+
__page_table_check_pte_clear_range(mm, addr, pmd);
114+
}
115+
103116
#else
104117

105118
static inline void page_table_check_alloc(struct page *page, unsigned int order)
@@ -143,5 +156,11 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
143156
{
144157
}
145158

159+
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
160+
unsigned long addr,
161+
pmd_t pmd)
162+
{
163+
}
164+
146165
#endif /* CONFIG_PAGE_TABLE_CHECK */
147166
#endif /* __LINUX_PAGE_TABLE_CHECK_H */

include/linux/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ static inline unsigned long pte_index(unsigned long address)
6262
{
6363
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
6464
}
65+
#define pte_index pte_index
6566

6667
#ifndef pmd_index
6768
static inline unsigned long pmd_index(unsigned long address)

ipc/sem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1964,6 +1964,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
19641964
*/
19651965
un = lookup_undo(ulp, semid);
19661966
if (un) {
1967+
spin_unlock(&ulp->lock);
19671968
kvfree(new);
19681969
goto success;
19691970
}
@@ -1976,9 +1977,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
19761977
ipc_assert_locked_object(&sma->sem_perm);
19771978
list_add(&new->list_id, &sma->list_id);
19781979
un = new;
1979-
1980-
success:
19811980
spin_unlock(&ulp->lock);
1981+
success:
19821982
sem_unlock(sma, -1);
19831983
out:
19841984
return un;

mm/debug_vm_pgtable.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,8 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args)
171171
ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
172172
pte = ptep_get(args->ptep);
173173
WARN_ON(pte_young(pte));
174+
175+
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
174176
}
175177

176178
static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)

mm/khugepaged.c

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/hashtable.h>
1717
#include <linux/userfaultfd_k.h>
1818
#include <linux/page_idle.h>
19+
#include <linux/page_table_check.h>
1920
#include <linux/swapops.h>
2021
#include <linux/shmem_fs.h>
2122

@@ -1416,6 +1417,21 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
14161417
return 0;
14171418
}
14181419

1420+
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1421+
unsigned long addr, pmd_t *pmdp)
1422+
{
1423+
spinlock_t *ptl;
1424+
pmd_t pmd;
1425+
1426+
mmap_assert_write_locked(mm);
1427+
ptl = pmd_lock(vma->vm_mm, pmdp);
1428+
pmd = pmdp_collapse_flush(vma, addr, pmdp);
1429+
spin_unlock(ptl);
1430+
mm_dec_nr_ptes(mm);
1431+
page_table_check_pte_clear_range(mm, addr, pmd);
1432+
pte_free(mm, pmd_pgtable(pmd));
1433+
}
1434+
14191435
/**
14201436
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
14211437
* address haddr.
@@ -1433,7 +1449,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
14331449
struct vm_area_struct *vma = find_vma(mm, haddr);
14341450
struct page *hpage;
14351451
pte_t *start_pte, *pte;
1436-
pmd_t *pmd, _pmd;
1452+
pmd_t *pmd;
14371453
spinlock_t *ptl;
14381454
int count = 0;
14391455
int i;
@@ -1509,12 +1525,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
15091525
}
15101526

15111527
/* step 4: collapse pmd */
1512-
ptl = pmd_lock(vma->vm_mm, pmd);
1513-
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1514-
spin_unlock(ptl);
1515-
mm_dec_nr_ptes(mm);
1516-
pte_free(mm, pmd_pgtable(_pmd));
1517-
1528+
collapse_and_free_pmd(mm, vma, haddr, pmd);
15181529
drop_hpage:
15191530
unlock_page(hpage);
15201531
put_page(hpage);
@@ -1552,7 +1563,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15521563
struct vm_area_struct *vma;
15531564
struct mm_struct *mm;
15541565
unsigned long addr;
1555-
pmd_t *pmd, _pmd;
1566+
pmd_t *pmd;
15561567

15571568
i_mmap_lock_write(mapping);
15581569
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1591,14 +1602,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15911602
* reverse order. Trylock is a way to avoid deadlock.
15921603
*/
15931604
if (mmap_write_trylock(mm)) {
1594-
if (!khugepaged_test_exit(mm)) {
1595-
spinlock_t *ptl = pmd_lock(mm, pmd);
1596-
/* assume page table is clear */
1597-
_pmd = pmdp_collapse_flush(vma, addr, pmd);
1598-
spin_unlock(ptl);
1599-
mm_dec_nr_ptes(mm);
1600-
pte_free(mm, pmd_pgtable(_pmd));
1601-
}
1605+
if (!khugepaged_test_exit(mm))
1606+
collapse_and_free_pmd(mm, vma, addr, pmd);
16021607
mmap_write_unlock(mm);
16031608
} else {
16041609
/* Try again later */

mm/kmemleak.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1410,7 +1410,8 @@ static void kmemleak_scan(void)
14101410
{
14111411
unsigned long flags;
14121412
struct kmemleak_object *object;
1413-
int i;
1413+
struct zone *zone;
1414+
int __maybe_unused i;
14141415
int new_leaks = 0;
14151416

14161417
jiffies_last_scan = jiffies;
@@ -1450,9 +1451,9 @@ static void kmemleak_scan(void)
14501451
* Struct page scanning for each node.
14511452
*/
14521453
get_online_mems();
1453-
for_each_online_node(i) {
1454-
unsigned long start_pfn = node_start_pfn(i);
1455-
unsigned long end_pfn = node_end_pfn(i);
1454+
for_each_populated_zone(zone) {
1455+
unsigned long start_pfn = zone->zone_start_pfn;
1456+
unsigned long end_pfn = zone_end_pfn(zone);
14561457
unsigned long pfn;
14571458

14581459
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
@@ -1461,8 +1462,8 @@ static void kmemleak_scan(void)
14611462
if (!page)
14621463
continue;
14631464

1464-
/* only scan pages belonging to this node */
1465-
if (page_to_nid(page) != i)
1465+
/* only scan pages belonging to this zone */
1466+
if (page_zone(page) != zone)
14661467
continue;
14671468
/* only scan if page is in use */
14681469
if (page_count(page) == 0)

mm/page_isolation.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
115115
* onlining - just onlined memory won't immediately be considered for
116116
* allocation.
117117
*/
118-
if (!isolated_page && PageBuddy(page)) {
118+
if (!isolated_page) {
119119
nr_pages = move_freepages_block(zone, page, migratetype, NULL);
120120
__mod_zone_freepage_state(zone, nr_pages, migratetype);
121121
}

mm/page_table_check.c

Lines changed: 27 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
8686
{
8787
struct page_ext *page_ext;
8888
struct page *page;
89+
unsigned long i;
8990
bool anon;
90-
int i;
9191

9292
if (!pfn_valid(pfn))
9393
return;
@@ -121,8 +121,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
121121
{
122122
struct page_ext *page_ext;
123123
struct page *page;
124+
unsigned long i;
124125
bool anon;
125-
int i;
126126

127127
if (!pfn_valid(pfn))
128128
return;
@@ -152,10 +152,10 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
152152
void __page_table_check_zero(struct page *page, unsigned int order)
153153
{
154154
struct page_ext *page_ext = lookup_page_ext(page);
155-
int i;
155+
unsigned long i;
156156

157157
BUG_ON(!page_ext);
158-
for (i = 0; i < (1 << order); i++) {
158+
for (i = 0; i < (1ul << order); i++) {
159159
struct page_table_check *ptc = get_page_table_check(page_ext);
160160

161161
BUG_ON(atomic_read(&ptc->anon_map_count));
@@ -206,17 +206,10 @@ EXPORT_SYMBOL(__page_table_check_pud_clear);
206206
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
207207
pte_t *ptep, pte_t pte)
208208
{
209-
pte_t old_pte;
210-
211209
if (&init_mm == mm)
212210
return;
213211

214-
old_pte = *ptep;
215-
if (pte_user_accessible_page(old_pte)) {
216-
page_table_check_clear(mm, addr, pte_pfn(old_pte),
217-
PAGE_SIZE >> PAGE_SHIFT);
218-
}
219-
212+
__page_table_check_pte_clear(mm, addr, *ptep);
220213
if (pte_user_accessible_page(pte)) {
221214
page_table_check_set(mm, addr, pte_pfn(pte),
222215
PAGE_SIZE >> PAGE_SHIFT,
@@ -228,17 +221,10 @@ EXPORT_SYMBOL(__page_table_check_pte_set);
228221
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
229222
pmd_t *pmdp, pmd_t pmd)
230223
{
231-
pmd_t old_pmd;
232-
233224
if (&init_mm == mm)
234225
return;
235226

236-
old_pmd = *pmdp;
237-
if (pmd_user_accessible_page(old_pmd)) {
238-
page_table_check_clear(mm, addr, pmd_pfn(old_pmd),
239-
PMD_PAGE_SIZE >> PAGE_SHIFT);
240-
}
241-
227+
__page_table_check_pmd_clear(mm, addr, *pmdp);
242228
if (pmd_user_accessible_page(pmd)) {
243229
page_table_check_set(mm, addr, pmd_pfn(pmd),
244230
PMD_PAGE_SIZE >> PAGE_SHIFT,
@@ -250,21 +236,34 @@ EXPORT_SYMBOL(__page_table_check_pmd_set);
250236
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
251237
pud_t *pudp, pud_t pud)
252238
{
253-
pud_t old_pud;
254-
255239
if (&init_mm == mm)
256240
return;
257241

258-
old_pud = *pudp;
259-
if (pud_user_accessible_page(old_pud)) {
260-
page_table_check_clear(mm, addr, pud_pfn(old_pud),
261-
PUD_PAGE_SIZE >> PAGE_SHIFT);
262-
}
263-
242+
__page_table_check_pud_clear(mm, addr, *pudp);
264243
if (pud_user_accessible_page(pud)) {
265244
page_table_check_set(mm, addr, pud_pfn(pud),
266245
PUD_PAGE_SIZE >> PAGE_SHIFT,
267246
pud_write(pud));
268247
}
269248
}
270249
EXPORT_SYMBOL(__page_table_check_pud_set);
250+
251+
void __page_table_check_pte_clear_range(struct mm_struct *mm,
252+
unsigned long addr,
253+
pmd_t pmd)
254+
{
255+
if (&init_mm == mm)
256+
return;
257+
258+
if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
259+
pte_t *ptep = pte_offset_map(&pmd, addr);
260+
unsigned long i;
261+
262+
pte_unmap(ptep);
263+
for (i = 0; i < PTRS_PER_PTE; i++) {
264+
__page_table_check_pte_clear(mm, addr, *ptep);
265+
addr += PAGE_SIZE;
266+
ptep++;
267+
}
268+
}
269+
}

tools/testing/selftests/vm/userfaultfd.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1417,6 +1417,7 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
14171417
static int userfaultfd_stress(void)
14181418
{
14191419
void *area;
1420+
char *tmp_area;
14201421
unsigned long nr;
14211422
struct uffdio_register uffdio_register;
14221423
struct uffd_stats uffd_stats[nr_cpus];
@@ -1527,9 +1528,13 @@ static int userfaultfd_stress(void)
15271528
count_verify[nr], nr);
15281529

15291530
/* prepare next bounce */
1530-
swap(area_src, area_dst);
1531+
tmp_area = area_src;
1532+
area_src = area_dst;
1533+
area_dst = tmp_area;
15311534

1532-
swap(area_src_alias, area_dst_alias);
1535+
tmp_area = area_src_alias;
1536+
area_src_alias = area_dst_alias;
1537+
area_dst_alias = tmp_area;
15331538

15341539
uffd_stats_report(uffd_stats, nr_cpus);
15351540
}

0 commit comments

Comments
 (0)