Skip to content

Commit 324d116

Browse files
author
Andreas Gruenbacher
committed
gfs2: Align read and write chunks to the page cache
Align the chunks that reads and writes are carried out in to the page cache rather than the user buffers. This will be more efficient in general, especially for allocating writes. Optimizing the case that the user buffer is gfs2 backed isn't very useful; we only need to make sure we won't deadlock. Signed-off-by: Andreas Gruenbacher <[email protected]>
1 parent 7238226 commit 324d116

File tree

1 file changed

+7
-8
lines changed

1 file changed

+7
-8
lines changed

fs/gfs2/file.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -771,6 +771,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
771771
}
772772

773773
static inline bool should_fault_in_pages(struct iov_iter *i,
774+
struct kiocb *iocb,
774775
size_t *prev_count,
775776
size_t *window_size)
776777
{
@@ -783,15 +784,13 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
783784
return false;
784785

785786
size = PAGE_SIZE;
786-
offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
787+
offs = offset_in_page(iocb->ki_pos);
787788
if (*prev_count != count || !*window_size) {
788789
size_t nr_dirtied;
789790

790-
size = ALIGN(offs + count, PAGE_SIZE);
791-
size = min_t(size_t, size, SZ_1M);
792791
nr_dirtied = max(current->nr_dirtied_pause -
793792
current->nr_dirtied, 8);
794-
size = min(size, nr_dirtied << PAGE_SHIFT);
793+
size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
795794
}
796795

797796
*prev_count = count;
@@ -845,7 +844,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
845844
if (ret > 0)
846845
read = ret;
847846

848-
if (should_fault_in_pages(to, &prev_count, &window_size)) {
847+
if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
849848
gfs2_holder_allow_demote(gh);
850849
window_size -= fault_in_iov_iter_writeable(to, window_size);
851850
gfs2_holder_disallow_demote(gh);
@@ -916,7 +915,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
916915
if (ret > 0)
917916
written = ret;
918917

919-
if (should_fault_in_pages(from, &prev_count, &window_size)) {
918+
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
920919
gfs2_holder_allow_demote(gh);
921920
window_size -= fault_in_iov_iter_readable(from, window_size);
922921
gfs2_holder_disallow_demote(gh);
@@ -984,7 +983,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
984983
if (ret > 0)
985984
read += ret;
986985

987-
if (should_fault_in_pages(to, &prev_count, &window_size)) {
986+
if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
988987
gfs2_holder_allow_demote(&gh);
989988
window_size -= fault_in_iov_iter_writeable(to, window_size);
990989
gfs2_holder_disallow_demote(&gh);
@@ -1061,7 +1060,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
10611060
goto out_unlock;
10621061

10631062
from->count = orig_count - written;
1064-
if (should_fault_in_pages(from, &prev_count, &window_size)) {
1063+
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
10651064
gfs2_holder_allow_demote(gh);
10661065
window_size -= fault_in_iov_iter_readable(from, window_size);
10671066
gfs2_holder_disallow_demote(gh);

0 commit comments

Comments
 (0)