Skip to content

Commit 9518ae6

Browse files
committed
Merge tag 'gfs2-for-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2
Pull gfs2 updates from Andreas Gruenbacher: - Properly fix the glock shrinker this time: it broke in commit "gfs2: Make glock lru list scanning safer" and commit "gfs2: fix glock shrinker ref issues" wasn't actually enough to fix it - On unmount, keep glocks around long enough that no more dlm callbacks can occur on them - Some more folio conversion patches from Matthew Wilcox - Lots of other smaller fixes and cleanups * tag 'gfs2-for-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (27 commits) gfs2: make timeout values more explicit gfs2: Convert gfs2_aspace_writepage() to use a folio gfs2: Add a migrate_folio operation for journalled files gfs2: Simplify gfs2_read_super gfs2: Convert gfs2_page_mkwrite() to use a folio gfs2: gfs2_freeze_unlock cleanup gfs2: Remove and replace gfs2_glock_queue_work gfs2: do_xmote fixes gfs2: finish_xmote cleanup gfs2: Unlock fewer glocks on unmount gfs2: Fix potential glock use-after-free on unmount gfs2: Remove ill-placed consistency check gfs2: Fix lru_count accounting gfs2: Fix "Make glock lru list scanning safer" Revert "gfs2: fix glock shrinker ref issues" gfs2: Fix "ignore unlock failures after withdraw" gfs2: Get rid of unnecessary test_and_set_bit gfs2: Don't set GLF_LOCK in gfs2_dispose_glock_lru gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async gfs2: Get rid of gfs2_glock_queue_put in signal_our_withdraw ...
2 parents 6fffab6 + c1c53c2 commit 9518ae6

File tree

18 files changed

+320
-258
lines changed

18 files changed

+320
-258
lines changed

fs/gfs2/aops.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
116116
* @folio: The folio to write
117117
* @wbc: The writeback control
118118
*
119-
* This is shared between writepage and writepages and implements the
120-
* core of the writepage operation. If a transaction is required then
119+
* Implements the core of write back. If a transaction is required then
121120
* the checked flag will have been set and the transaction will have
122121
* already been started before this is called.
123122
*/
@@ -755,6 +754,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
755754
.readahead = gfs2_readahead,
756755
.dirty_folio = jdata_dirty_folio,
757756
.bmap = gfs2_bmap,
757+
.migrate_folio = buffer_migrate_folio,
758758
.invalidate_folio = gfs2_invalidate_folio,
759759
.release_folio = gfs2_release_folio,
760760
.is_partially_uptodate = block_is_partially_uptodate,

fs/gfs2/bmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1827,7 +1827,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
18271827
gfs2_assert_withdraw(sdp, bh);
18281828
if (gfs2_assert_withdraw(sdp,
18291829
prev_bnr != bh->b_blocknr)) {
1830-
fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1830+
fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u, "
18311831
"s_h:%u, mp_h:%u\n",
18321832
(unsigned long long)ip->i_no_addr,
18331833
prev_bnr, ip->i_height, strip_h, mp_h);

fs/gfs2/dir.c

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -562,15 +562,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
562562
int ret = 0;
563563

564564
ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
565-
if (ret < 0)
566-
goto consist_inode;
567-
565+
if (ret < 0) {
566+
gfs2_consist_inode(GFS2_I(inode));
567+
return ERR_PTR(-EIO);
568+
}
568569
offset = ret;
569570
prev = NULL;
570571
dent = buf + offset;
571572
size = be16_to_cpu(dent->de_rec_len);
572-
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
573-
goto consist_inode;
573+
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) {
574+
gfs2_consist_inode(GFS2_I(inode));
575+
return ERR_PTR(-EIO);
576+
}
574577
do {
575578
ret = scan(dent, name, opaque);
576579
if (ret)
@@ -582,8 +585,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
582585
dent = buf + offset;
583586
size = be16_to_cpu(dent->de_rec_len);
584587
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
585-
len, 0))
586-
goto consist_inode;
588+
len, 0)) {
589+
gfs2_consist_inode(GFS2_I(inode));
590+
return ERR_PTR(-EIO);
591+
}
587592
} while(1);
588593

589594
switch(ret) {
@@ -597,10 +602,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
597602
BUG_ON(ret > 0);
598603
return ERR_PTR(ret);
599604
}
600-
601-
consist_inode:
602-
gfs2_consist_inode(GFS2_I(inode));
603-
return ERR_PTR(-EIO);
604605
}
605606

606607
static int dirent_check_reclen(struct gfs2_inode *dip,
@@ -609,14 +610,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip,
609610
const void *ptr = d;
610611
u16 rec_len = be16_to_cpu(d->de_rec_len);
611612

612-
if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
613-
goto broken;
613+
if (unlikely(rec_len < sizeof(struct gfs2_dirent))) {
614+
gfs2_consist_inode(dip);
615+
return -EIO;
616+
}
614617
ptr += rec_len;
615618
if (ptr < end_p)
616619
return rec_len;
617620
if (ptr == end_p)
618621
return -ENOENT;
619-
broken:
622+
620623
gfs2_consist_inode(dip);
621624
return -EIO;
622625
}

fs/gfs2/file.c

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -376,23 +376,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
376376
}
377377

378378
/**
379-
* gfs2_allocate_page_backing - Allocate blocks for a write fault
380-
* @page: The (locked) page to allocate backing for
379+
* gfs2_allocate_folio_backing - Allocate blocks for a write fault
380+
* @folio: The (locked) folio to allocate backing for
381381
* @length: Size of the allocation
382382
*
383-
* We try to allocate all the blocks required for the page in one go. This
383+
* We try to allocate all the blocks required for the folio in one go. This
384384
* might fail for various reasons, so we keep trying until all the blocks to
385-
* back this page are allocated. If some of the blocks are already allocated,
385+
* back this folio are allocated. If some of the blocks are already allocated,
386386
* that is ok too.
387387
*/
388-
static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
388+
static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
389389
{
390-
u64 pos = page_offset(page);
390+
u64 pos = folio_pos(folio);
391391

392392
do {
393393
struct iomap iomap = { };
394394

395-
if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
395+
if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
396396
return -EIO;
397397

398398
if (length < iomap.length)
@@ -414,16 +414,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
414414

415415
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
416416
{
417-
struct page *page = vmf->page;
417+
struct folio *folio = page_folio(vmf->page);
418418
struct inode *inode = file_inode(vmf->vma->vm_file);
419419
struct gfs2_inode *ip = GFS2_I(inode);
420420
struct gfs2_sbd *sdp = GFS2_SB(inode);
421421
struct gfs2_alloc_parms ap = {};
422-
u64 offset = page_offset(page);
422+
u64 pos = folio_pos(folio);
423423
unsigned int data_blocks, ind_blocks, rblocks;
424424
vm_fault_t ret = VM_FAULT_LOCKED;
425425
struct gfs2_holder gh;
426-
unsigned int length;
426+
size_t length;
427427
loff_t size;
428428
int err;
429429

@@ -436,23 +436,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
436436
goto out_uninit;
437437
}
438438

439-
/* Check page index against inode size */
439+
/* Check folio index against inode size */
440440
size = i_size_read(inode);
441-
if (offset >= size) {
441+
if (pos >= size) {
442442
ret = VM_FAULT_SIGBUS;
443443
goto out_unlock;
444444
}
445445

446-
/* Update file times before taking page lock */
446+
/* Update file times before taking folio lock */
447447
file_update_time(vmf->vma->vm_file);
448448

449-
/* page is wholly or partially inside EOF */
450-
if (size - offset < PAGE_SIZE)
451-
length = size - offset;
449+
/* folio is wholly or partially inside EOF */
450+
if (size - pos < folio_size(folio))
451+
length = size - pos;
452452
else
453-
length = PAGE_SIZE;
453+
length = folio_size(folio);
454454

455-
gfs2_size_hint(vmf->vma->vm_file, offset, length);
455+
gfs2_size_hint(vmf->vma->vm_file, pos, length);
456456

457457
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
458458
set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -463,11 +463,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
463463
*/
464464

465465
if (!gfs2_is_stuffed(ip) &&
466-
!gfs2_write_alloc_required(ip, offset, length)) {
467-
lock_page(page);
468-
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
466+
!gfs2_write_alloc_required(ip, pos, length)) {
467+
folio_lock(folio);
468+
if (!folio_test_uptodate(folio) ||
469+
folio->mapping != inode->i_mapping) {
469470
ret = VM_FAULT_NOPAGE;
470-
unlock_page(page);
471+
folio_unlock(folio);
471472
}
472473
goto out_unlock;
473474
}
@@ -504,7 +505,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
504505
goto out_trans_fail;
505506
}
506507

507-
/* Unstuff, if required, and allocate backing blocks for page */
508+
/* Unstuff, if required, and allocate backing blocks for folio */
508509
if (gfs2_is_stuffed(ip)) {
509510
err = gfs2_unstuff_dinode(ip);
510511
if (err) {
@@ -513,22 +514,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
513514
}
514515
}
515516

516-
lock_page(page);
517+
folio_lock(folio);
517518
/* If truncated, we must retry the operation, we may have raced
518519
* with the glock demotion code.
519520
*/
520-
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
521+
if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
521522
ret = VM_FAULT_NOPAGE;
522523
goto out_page_locked;
523524
}
524525

525-
err = gfs2_allocate_page_backing(page, length);
526+
err = gfs2_allocate_folio_backing(folio, length);
526527
if (err)
527528
ret = vmf_fs_error(err);
528529

529530
out_page_locked:
530531
if (ret != VM_FAULT_LOCKED)
531-
unlock_page(page);
532+
folio_unlock(folio);
532533
out_trans_end:
533534
gfs2_trans_end(sdp);
534535
out_trans_fail:
@@ -540,8 +541,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
540541
out_uninit:
541542
gfs2_holder_uninit(&gh);
542543
if (ret == VM_FAULT_LOCKED) {
543-
set_page_dirty(page);
544-
wait_for_stable_page(page);
544+
folio_mark_dirty(folio);
545+
folio_wait_stable(folio);
545546
}
546547
sb_end_pagefault(inode->i_sb);
547548
return ret;

0 commit comments

Comments
 (0)