Skip to content

File tree

1 file changed

+67
-42
lines changed

1 file changed

+67
-42
lines changed

fs/afs/write.c

Lines changed: 67 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -490,47 +490,25 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter,
490490
}
491491

492492
/*
493-
* Synchronously write back the locked page and any subsequent non-locked dirty
494-
* pages.
493+
* Extend the region to be written back to include subsequent contiguously
494+
* dirty pages if possible, but don't sleep while doing so.
495+
*
496+
* If this page holds new content, then we can include filler zeros in the
497+
* writeback.
495498
*/
496-
static int afs_write_back_from_locked_page(struct address_space *mapping,
497-
struct writeback_control *wbc,
498-
struct page *primary_page,
499-
pgoff_t final_page)
499+
static void afs_extend_writeback(struct address_space *mapping,
500+
struct afs_vnode *vnode,
501+
long *_count,
502+
pgoff_t start,
503+
pgoff_t final_page,
504+
unsigned *_offset,
505+
unsigned *_to,
506+
bool new_content)
500507
{
501-
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
502-
struct iov_iter iter;
503508
struct page *pages[8], *page;
504-
unsigned long count, priv;
505-
unsigned n, offset, to, f, t;
506-
pgoff_t start, first, last;
507-
loff_t i_size, pos, end;
508-
int loop, ret;
509-
510-
_enter(",%lx", primary_page->index);
511-
512-
count = 1;
513-
if (test_set_page_writeback(primary_page))
514-
BUG();
515-
516-
/* Find all consecutive lockable dirty pages that have contiguous
517-
* written regions, stopping when we find a page that is not
518-
* immediately lockable, is not dirty or is missing, or we reach the
519-
* end of the range.
520-
*/
521-
start = primary_page->index;
522-
priv = page_private(primary_page);
523-
offset = afs_page_dirty_from(primary_page, priv);
524-
to = afs_page_dirty_to(primary_page, priv);
525-
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
526-
527-
WARN_ON(offset == to);
528-
if (offset == to)
529-
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
530-
531-
if (start >= final_page ||
532-
(to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
533-
goto no_more;
509+
unsigned long count = *_count, priv;
510+
unsigned offset = *_offset, to = *_to, n, f, t;
511+
int loop;
534512

535513
start++;
536514
do {
@@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
551529

552530
for (loop = 0; loop < n; loop++) {
553531
page = pages[loop];
554-
if (to != PAGE_SIZE &&
555-
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
532+
if (to != PAGE_SIZE && !new_content)
556533
break;
557534
if (page->index > final_page)
558535
break;
@@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
566543
priv = page_private(page);
567544
f = afs_page_dirty_from(page, priv);
568545
t = afs_page_dirty_to(page, priv);
569-
if (f != 0 &&
570-
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
546+
if (f != 0 && !new_content) {
571547
unlock_page(page);
572548
break;
573549
}
@@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
593569
} while (start <= final_page && count < 65536);
594570

595571
no_more:
572+
*_count = count;
573+
*_offset = offset;
574+
*_to = to;
575+
}
576+
577+
/*
578+
* Synchronously write back the locked page and any subsequent non-locked dirty
579+
* pages.
580+
*/
581+
static int afs_write_back_from_locked_page(struct address_space *mapping,
582+
struct writeback_control *wbc,
583+
struct page *primary_page,
584+
pgoff_t final_page)
585+
{
586+
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
587+
struct iov_iter iter;
588+
unsigned long count, priv;
589+
unsigned offset, to;
590+
pgoff_t start, first, last;
591+
loff_t i_size, pos, end;
592+
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
593+
int ret;
594+
595+
_enter(",%lx", primary_page->index);
596+
597+
count = 1;
598+
if (test_set_page_writeback(primary_page))
599+
BUG();
600+
601+
/* Find all consecutive lockable dirty pages that have contiguous
602+
* written regions, stopping when we find a page that is not
603+
* immediately lockable, is not dirty or is missing, or we reach the
604+
* end of the range.
605+
*/
606+
start = primary_page->index;
607+
priv = page_private(primary_page);
608+
offset = afs_page_dirty_from(primary_page, priv);
609+
to = afs_page_dirty_to(primary_page, priv);
610+
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
611+
612+
WARN_ON(offset == to);
613+
if (offset == to)
614+
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
615+
616+
if (start < final_page &&
617+
(to == PAGE_SIZE || new_content))
618+
afs_extend_writeback(mapping, vnode, &count, start, final_page,
619+
&offset, &to, new_content);
620+
596621
/* We now have a contiguous set of dirty pages, each with writeback
597622
* set; the first page is still locked at this point, but all the rest
598623
* have been unlocked.

0 commit comments

Comments
 (0)