Skip to content

Commit ed9832b

Browse files
Kundan Kumaraxboe
authored andcommitted
block: introduce folio awareness and add a bigger size from folio
Add a bigger size from folio to bio and skip merge processing for pages. Fetch the offset of page within a folio. Depending on the size of folio and folio_offset, fetch a larger length. This length may consist of multiple contiguous pages if folio is multiorder. Using the length calculate number of pages which will be added to bio and increment the loop counter to skip those pages. This technique helps to avoid overhead of merging pages which belong to same large order folio. Also folio-ize the functions bio_iov_add_page() and bio_iov_add_zone_append_page() Signed-off-by: Kundan Kumar <[email protected]> Tested-by: Luis Chamberlain <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 7de9895 commit ed9832b

File tree

1 file changed

+61
-18
lines changed

1 file changed

+61
-18
lines changed

block/bio.c

Lines changed: 61 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -931,7 +931,8 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
931931
if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
932932
return false;
933933

934-
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
934+
*same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) &
935+
PAGE_MASK));
935936
if (!*same_page) {
936937
if (IS_ENABLED(CONFIG_KMSAN))
937938
return false;
@@ -1227,8 +1228,8 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
12271228
bio_set_flag(bio, BIO_CLONED);
12281229
}
12291230

1230-
static int bio_iov_add_page(struct bio *bio, struct page *page,
1231-
unsigned int len, unsigned int offset)
1231+
static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
1232+
size_t offset)
12321233
{
12331234
bool same_page = false;
12341235

@@ -1237,30 +1238,61 @@ static int bio_iov_add_page(struct bio *bio, struct page *page,
12371238

12381239
if (bio->bi_vcnt > 0 &&
12391240
bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1240-
page, len, offset, &same_page)) {
1241+
folio_page(folio, 0), len, offset,
1242+
&same_page)) {
12411243
bio->bi_iter.bi_size += len;
12421244
if (same_page)
1243-
bio_release_page(bio, page);
1245+
bio_release_page(bio, folio_page(folio, 0));
12441246
return 0;
12451247
}
1246-
__bio_add_page(bio, page, len, offset);
1248+
bio_add_folio_nofail(bio, folio, len, offset);
12471249
return 0;
12481250
}
12491251

1250-
static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1251-
unsigned int len, unsigned int offset)
1252+
static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
1253+
size_t len, size_t offset)
12521254
{
12531255
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
12541256
bool same_page = false;
12551257

1256-
if (bio_add_hw_page(q, bio, page, len, offset,
1258+
if (bio_add_hw_folio(q, bio, folio, len, offset,
12571259
queue_max_zone_append_sectors(q), &same_page) != len)
12581260
return -EINVAL;
12591261
if (same_page)
1260-
bio_release_page(bio, page);
1262+
bio_release_page(bio, folio_page(folio, 0));
12611263
return 0;
12621264
}
12631265

1266+
static unsigned int get_contig_folio_len(unsigned int *num_pages,
1267+
struct page **pages, unsigned int i,
1268+
struct folio *folio, size_t left,
1269+
size_t offset)
1270+
{
1271+
size_t bytes = left;
1272+
size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
1273+
unsigned int j;
1274+
1275+
/*
1276+
* We might COW a single page in the middle of
1277+
* a large folio, so we have to check that all
1278+
* pages belong to the same folio.
1279+
*/
1280+
bytes -= contig_sz;
1281+
for (j = i + 1; j < i + *num_pages; j++) {
1282+
size_t next = min_t(size_t, PAGE_SIZE, bytes);
1283+
1284+
if (page_folio(pages[j]) != folio ||
1285+
pages[j] != pages[j - 1] + 1) {
1286+
break;
1287+
}
1288+
contig_sz += next;
1289+
bytes -= next;
1290+
}
1291+
*num_pages = j - i;
1292+
1293+
return contig_sz;
1294+
}
1295+
12641296
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
12651297

12661298
/**
@@ -1280,9 +1312,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
12801312
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
12811313
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
12821314
struct page **pages = (struct page **)bv;
1283-
ssize_t size, left;
1284-
unsigned len, i = 0;
1285-
size_t offset;
1315+
ssize_t size;
1316+
unsigned int num_pages, i = 0;
1317+
size_t offset, folio_offset, left, len;
12861318
int ret = 0;
12871319

12881320
/*
@@ -1322,17 +1354,28 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
13221354
goto out;
13231355
}
13241356

1325-
for (left = size, i = 0; left > 0; left -= len, i++) {
1357+
for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
13261358
struct page *page = pages[i];
1359+
struct folio *folio = page_folio(page);
1360+
1361+
folio_offset = ((size_t)folio_page_idx(folio, page) <<
1362+
PAGE_SHIFT) + offset;
1363+
1364+
len = min(folio_size(folio) - folio_offset, left);
1365+
1366+
num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1367+
1368+
if (num_pages > 1)
1369+
len = get_contig_folio_len(&num_pages, pages, i,
1370+
folio, left, offset);
13271371

1328-
len = min_t(size_t, PAGE_SIZE - offset, left);
13291372
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1330-
ret = bio_iov_add_zone_append_page(bio, page, len,
1331-
offset);
1373+
ret = bio_iov_add_zone_append_folio(bio, folio, len,
1374+
folio_offset);
13321375
if (ret)
13331376
break;
13341377
} else
1335-
bio_iov_add_page(bio, page, len, offset);
1378+
bio_iov_add_folio(bio, folio, len, folio_offset);
13361379

13371380
offset = 0;
13381381
}

0 commit comments

Comments
 (0)