Skip to content

Commit 95607ad

Browse files
committed
Merge tag 'mm-hotfixes-stable-2022-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Thirteen fixes, almost all for MM. Seven of these are cc:stable and the remainder fix up the changes which went into this -rc cycle" * tag 'mm-hotfixes-stable-2022-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: kprobes: don't call disarm_kprobe() for disabled kprobes mm/shmem: shmem_replace_page() remember NR_SHMEM mm/shmem: tmpfs fallocate use file_modified() mm/shmem: fix chattr fsflags support in tmpfs mm/hugetlb: support write-faults in shared mappings mm/hugetlb: fix hugetlb not supporting softdirty tracking mm/uffd: reset write protection when unregister with wp-mode mm/smaps: don't access young/dirty bit if pte unpresent mm: add DEVICE_ZONE to FOR_ALL_ZONES kernel/sys_ni: add compat entry for fadvise64_64 mm/gup: fix FOLL_FORCE COW security issue and remove FOLL_COW Revert "zram: remove double compression logic" get_maintainer: add Alan to .get_maintainer.ignore
2 parents 6234806 + 9c80e79 commit 95607ad

File tree

18 files changed

+240
-119
lines changed

18 files changed

+240
-119
lines changed

.get_maintainer.ignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,4 @@
1+
2+
13
Christoph Hellwig <[email protected]>
24
Marc Gonzalez <[email protected]>

drivers/block/zram/zram_drv.c

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
11461146
static ssize_t debug_stat_show(struct device *dev,
11471147
struct device_attribute *attr, char *buf)
11481148
{
1149-
int version = 2;
1149+
int version = 1;
11501150
struct zram *zram = dev_to_zram(dev);
11511151
ssize_t ret;
11521152

11531153
down_read(&zram->init_lock);
11541154
ret = scnprintf(buf, PAGE_SIZE,
1155-
"version: %d\n%8llu\n",
1155+
"version: %d\n%8llu %8llu\n",
11561156
version,
1157+
(u64)atomic64_read(&zram->stats.writestall),
11571158
(u64)atomic64_read(&zram->stats.miss_free));
11581159
up_read(&zram->init_lock);
11591160

@@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13511352
{
13521353
int ret = 0;
13531354
unsigned long alloced_pages;
1354-
unsigned long handle = 0;
1355+
unsigned long handle = -ENOMEM;
13551356
unsigned int comp_len = 0;
13561357
void *src, *dst, *mem;
13571358
struct zcomp_strm *zstrm;
@@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13691370
}
13701371
kunmap_atomic(mem);
13711372

1373+
compress_again:
13721374
zstrm = zcomp_stream_get(zram->comp);
13731375
src = kmap_atomic(page);
13741376
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13771379
if (unlikely(ret)) {
13781380
zcomp_stream_put(zram->comp);
13791381
pr_err("Compression failed! err=%d\n", ret);
1382+
zs_free(zram->mem_pool, handle);
13801383
return ret;
13811384
}
13821385

13831386
if (comp_len >= huge_class_size)
13841387
comp_len = PAGE_SIZE;
1385-
1386-
handle = zs_malloc(zram->mem_pool, comp_len,
1387-
__GFP_KSWAPD_RECLAIM |
1388-
__GFP_NOWARN |
1389-
__GFP_HIGHMEM |
1390-
__GFP_MOVABLE);
1391-
1388+
/*
1389+
* handle allocation has 2 paths:
1390+
* a) fast path is executed with preemption disabled (for
1391+
* per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1392+
* since we can't sleep;
1393+
* b) slow path enables preemption and attempts to allocate
1394+
* the page with __GFP_DIRECT_RECLAIM bit set. we have to
1395+
* put per-cpu compression stream and, thus, to re-do
1396+
* the compression once handle is allocated.
1397+
*
1398+
* if we have a 'non-null' handle here then we are coming
1399+
* from the slow path and handle has already been allocated.
1400+
*/
1401+
if (IS_ERR((void *)handle))
1402+
handle = zs_malloc(zram->mem_pool, comp_len,
1403+
__GFP_KSWAPD_RECLAIM |
1404+
__GFP_NOWARN |
1405+
__GFP_HIGHMEM |
1406+
__GFP_MOVABLE);
13921407
if (IS_ERR((void *)handle)) {
13931408
zcomp_stream_put(zram->comp);
1409+
atomic64_inc(&zram->stats.writestall);
1410+
handle = zs_malloc(zram->mem_pool, comp_len,
1411+
GFP_NOIO | __GFP_HIGHMEM |
1412+
__GFP_MOVABLE);
1413+
if (!IS_ERR((void *)handle))
1414+
goto compress_again;
13941415
return PTR_ERR((void *)handle);
13951416
}
13961417

@@ -1948,6 +1969,7 @@ static int zram_add(void)
19481969
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
19491970
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
19501971

1972+
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
19511973
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
19521974
if (ret)
19531975
goto out_cleanup_disk;

drivers/block/zram/zram_drv.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ struct zram_stats {
8181
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
8282
atomic64_t pages_stored; /* no. of pages currently stored */
8383
atomic_long_t max_used_pages; /* no. of maximum pages stored */
84+
atomic64_t writestall; /* no. of write slow paths */
8485
atomic64_t miss_free; /* no. of missed free */
8586
#ifdef CONFIG_ZRAM_WRITEBACK
8687
atomic64_t bd_count; /* no. of pages in backing device */

fs/proc/task_mmu.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
527527
struct vm_area_struct *vma = walk->vma;
528528
bool locked = !!(vma->vm_flags & VM_LOCKED);
529529
struct page *page = NULL;
530-
bool migration = false;
530+
bool migration = false, young = false, dirty = false;
531531

532532
if (pte_present(*pte)) {
533533
page = vm_normal_page(vma, addr, *pte);
534+
young = pte_young(*pte);
535+
dirty = pte_dirty(*pte);
534536
} else if (is_swap_pte(*pte)) {
535537
swp_entry_t swpent = pte_to_swp_entry(*pte);
536538

@@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
560562
if (!page)
561563
return;
562564

563-
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
564-
locked, migration);
565+
smaps_account(mss, page, false, young, dirty, locked, migration);
565566
}
566567

567568
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

fs/userfaultfd.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1601,6 +1601,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
16011601
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
16021602
}
16031603

1604+
/* Reset ptes for the whole vma range if wr-protected */
1605+
if (userfaultfd_wp(vma))
1606+
uffd_wp_range(mm, vma, start, vma_end - start, false);
1607+
16041608
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
16051609
prev = vma_merge(mm, prev, start, vma_end, new_flags,
16061610
vma->anon_vma, vma->vm_file, vma->vm_pgoff,

include/linux/mm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2885,7 +2885,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
28852885
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
28862886
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
28872887
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2888-
#define FOLL_COW 0x4000 /* internal GUP flag */
28892888
#define FOLL_ANON 0x8000 /* don't do file mappings */
28902889
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
28912890
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */

include/linux/shmem_fs.h

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,10 @@ struct shmem_inode_info {
2929
struct inode vfs_inode;
3030
};
3131

32-
#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
33-
#define SHMEM_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE
34-
#define SHMEM_FL_INHERITED FS_FL_USER_MODIFIABLE
35-
36-
/* Flags that are appropriate for regular files (all but dir-specific ones). */
37-
#define SHMEM_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
38-
39-
/* Flags that are appropriate for non-directories/regular files. */
40-
#define SHMEM_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
32+
#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
33+
#define SHMEM_FL_USER_MODIFIABLE \
34+
(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
35+
#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
4136

4237
struct shmem_sb_info {
4338
unsigned long max_blocks; /* How many blocks are allowed */

include/linux/userfaultfd_k.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
7373
extern int mwriteprotect_range(struct mm_struct *dst_mm,
7474
unsigned long start, unsigned long len,
7575
bool enable_wp, atomic_t *mmap_changing);
76+
extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
77+
unsigned long start, unsigned long len, bool enable_wp);
7678

7779
/* mm helpers */
7880
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,

include/linux/vm_event_item.h

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,19 @@
2020
#define HIGHMEM_ZONE(xx)
2121
#endif
2222

23-
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
23+
#ifdef CONFIG_ZONE_DEVICE
24+
#define DEVICE_ZONE(xx) xx##_DEVICE,
25+
#else
26+
#define DEVICE_ZONE(xx)
27+
#endif
28+
29+
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
30+
HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
2431

2532
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
26-
FOR_ALL_ZONES(PGALLOC),
27-
FOR_ALL_ZONES(ALLOCSTALL),
28-
FOR_ALL_ZONES(PGSCAN_SKIP),
33+
FOR_ALL_ZONES(PGALLOC)
34+
FOR_ALL_ZONES(ALLOCSTALL)
35+
FOR_ALL_ZONES(PGSCAN_SKIP)
2936
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
3037
PGFAULT, PGMAJFAULT,
3138
PGLAZYFREED,

kernel/kprobes.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1707,11 +1707,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
17071707
/* Try to disarm and disable this/parent probe */
17081708
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
17091709
/*
1710-
* If 'kprobes_all_disarmed' is set, 'orig_p'
1711-
* should have already been disarmed, so
1712-
* skip unneed disarming process.
1710+
* Don't be lazy here. Even if 'kprobes_all_disarmed'
1711+
* is false, 'orig_p' might not have been armed yet.
1712+
* Note arm_all_kprobes() __tries__ to arm all kprobes
1713+
* on the best effort basis.
17131714
*/
1714-
if (!kprobes_all_disarmed) {
1715+
if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
17151716
ret = disarm_kprobe(orig_p, true);
17161717
if (ret) {
17171718
p->flags &= ~KPROBE_FLAG_DISABLED;

0 commit comments

Comments
 (0)