Skip to content

Commit 8faccfe

Browse files
committed
Merge tag 'mm-hotfixes-stable-2024-07-03-22-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from, Andrew Morton: "6 hotfies, all cc:stable. Some fixes for longstanding nilfs2 issues and three unrelated MM fixes" * tag 'mm-hotfixes-stable-2024-07-03-22-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: nilfs2: fix incorrect inode allocation from reserved inodes nilfs2: add missing check for inode numbers on directory entries nilfs2: fix inode number range checks mm: avoid overflows in dirty throttling logic Revert "mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again" mm: optimize the redundant loop of mm_update_owner_next()
2 parents 795c58e + 93aef9e commit 8faccfe

File tree

10 files changed

+70
-20
lines changed

10 files changed

+70
-20
lines changed

fs/nilfs2/alloc.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
377377
* @target: offset number of an entry in the group (start point)
378378
* @bsize: size in bits
379379
* @lock: spin lock protecting @bitmap
380+
* @wrap: whether to wrap around
380381
*/
381382
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
382383
unsigned long target,
383384
unsigned int bsize,
384-
spinlock_t *lock)
385+
spinlock_t *lock, bool wrap)
385386
{
386387
int pos, end = bsize;
387388

@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
397398

398399
end = target;
399400
}
401+
if (!wrap)
402+
return -ENOSPC;
400403

401404
/* wrap around */
402405
for (pos = 0; pos < end; pos++) {
@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
495498
* nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
496499
* @inode: inode of metadata file using this allocator
497500
* @req: nilfs_palloc_req structure exchanged for the allocation
501+
* @wrap: whether to wrap around
498502
*/
499503
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
500-
struct nilfs_palloc_req *req)
504+
struct nilfs_palloc_req *req, bool wrap)
501505
{
502506
struct buffer_head *desc_bh, *bitmap_bh;
503507
struct nilfs_palloc_group_desc *desc;
@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
516520
entries_per_group = nilfs_palloc_entries_per_group(inode);
517521

518522
for (i = 0; i < ngroups; i += n) {
519-
if (group >= ngroups) {
523+
if (group >= ngroups && wrap) {
520524
/* wrap around */
521525
group = 0;
522526
maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
@@ -550,7 +554,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
550554
bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
551555
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
552556
pos = nilfs_palloc_find_available_slot(
553-
bitmap, group_offset, entries_per_group, lock);
557+
bitmap, group_offset, entries_per_group, lock,
558+
wrap);
559+
/*
560+
* Since the search for a free slot in the second and
561+
* subsequent bitmap blocks always starts from the
562+
* beginning, the wrap flag only has an effect on the
563+
* first search.
564+
*/
554565
kunmap_local(bitmap_kaddr);
555566
if (pos >= 0)
556567
goto found;

fs/nilfs2/alloc.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
5050
struct buffer_head *pr_entry_bh;
5151
};
5252

53-
int nilfs_palloc_prepare_alloc_entry(struct inode *,
54-
struct nilfs_palloc_req *);
53+
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
54+
struct nilfs_palloc_req *req, bool wrap);
5555
void nilfs_palloc_commit_alloc_entry(struct inode *,
5656
struct nilfs_palloc_req *);
5757
void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);

fs/nilfs2/dat.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
7575
{
7676
int ret;
7777

78-
ret = nilfs_palloc_prepare_alloc_entry(dat, req);
78+
ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
7979
if (ret < 0)
8080
return ret;
8181

fs/nilfs2/dir.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
135135
goto Enamelen;
136136
if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
137137
goto Espan;
138+
if (unlikely(p->inode &&
139+
NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
140+
goto Einumber;
138141
}
139142
if (offs != limit)
140143
goto Eend;
@@ -160,6 +163,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
160163
goto bad_entry;
161164
Espan:
162165
error = "directory entry across blocks";
166+
goto bad_entry;
167+
Einumber:
168+
error = "disallowed inode number";
163169
bad_entry:
164170
nilfs_error(sb,
165171
"bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",

fs/nilfs2/ifile.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,13 +56,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
5656
struct nilfs_palloc_req req;
5757
int ret;
5858

59-
req.pr_entry_nr = 0; /*
60-
* 0 says find free inode from beginning
61-
* of a group. dull code!!
62-
*/
59+
req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
6360
req.pr_entry_bh = NULL;
6461

65-
ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
62+
ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
6663
if (!ret) {
6764
ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
6865
&req.pr_entry_bh);

fs/nilfs2/nilfs.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,15 @@ enum {
116116
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
117117

118118
#define NILFS_MDT_INODE(sb, ino) \
119-
((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
119+
((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
120120
#define NILFS_VALID_INODE(sb, ino) \
121-
((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
121+
((ino) >= NILFS_FIRST_INO(sb) || \
122+
((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
123+
124+
#define NILFS_PRIVATE_INODE(ino) ({ \
125+
ino_t __ino = (ino); \
126+
((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO && \
127+
(__ino) != NILFS_SKETCH_INO); })
122128

123129
/**
124130
* struct nilfs_transaction_info: context information for synchronization

fs/nilfs2/the_nilfs.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
452452
}
453453

454454
nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
455+
if (nilfs->ns_first_ino < NILFS_USER_INO) {
456+
nilfs_err(nilfs->ns_sb,
457+
"too small lower limit for non-reserved inode numbers: %u",
458+
nilfs->ns_first_ino);
459+
return -EINVAL;
460+
}
455461

456462
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
457463
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {

fs/nilfs2/the_nilfs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ struct the_nilfs {
182182
unsigned long ns_nrsvsegs;
183183
unsigned long ns_first_data_block;
184184
int ns_inode_size;
185-
int ns_first_ino;
185+
unsigned int ns_first_ino;
186186
u32 ns_crc_seed;
187187

188188
/* /sys/fs/<nilfs>/<device> */

kernel/exit.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,8 @@ void mm_update_next_owner(struct mm_struct *mm)
484484
* Search through everything else, we should not get here often.
485485
*/
486486
for_each_process(g) {
487+
if (atomic_read(&mm->mm_users) <= 1)
488+
break;
487489
if (g->flags & PF_KTHREAD)
488490
continue;
489491
for_each_thread(g, c) {

mm/page-writeback.c

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
415415
else
416416
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
417417

418-
if (bg_thresh >= thresh)
419-
bg_thresh = thresh / 2;
420418
tsk = current;
421419
if (rt_task(tsk)) {
422420
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
423421
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
424422
}
423+
/*
424+
* Dirty throttling logic assumes the limits in page units fit into
425+
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
426+
*/
427+
if (thresh > UINT_MAX)
428+
thresh = UINT_MAX;
429+
/* This makes sure bg_thresh is within 32-bits as well */
430+
if (bg_thresh >= thresh)
431+
bg_thresh = thresh / 2;
425432
dtc->thresh = thresh;
426433
dtc->bg_thresh = bg_thresh;
427434

@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
471478
if (rt_task(tsk))
472479
dirty += dirty / 4;
473480

474-
return dirty;
481+
/*
482+
* Dirty throttling logic assumes the limits in page units fit into
483+
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
484+
*/
485+
return min_t(unsigned long, dirty, UINT_MAX);
475486
}
476487

477488
/**
@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
508519
void *buffer, size_t *lenp, loff_t *ppos)
509520
{
510521
int ret;
522+
unsigned long old_bytes = dirty_background_bytes;
511523

512524
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
513-
if (ret == 0 && write)
525+
if (ret == 0 && write) {
526+
if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
527+
UINT_MAX) {
528+
dirty_background_bytes = old_bytes;
529+
return -ERANGE;
530+
}
514531
dirty_background_ratio = 0;
532+
}
515533
return ret;
516534
}
517535

@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
537555

538556
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
539557
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
558+
if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
559+
vm_dirty_bytes = old_bytes;
560+
return -ERANGE;
561+
}
540562
writeback_set_ratelimit();
541563
vm_dirty_ratio = 0;
542564
}
@@ -1660,7 +1682,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
16601682
*/
16611683
dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
16621684
dtc->wb_bg_thresh = dtc->thresh ?
1663-
div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1685+
div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
16641686

16651687
/*
16661688
* In order to avoid the stacked BDI deadlock we need

0 commit comments

Comments
 (0)