Skip to content

Commit 6ded703

Browse files
committed
brd: check for REQ_NOWAIT and set correct page allocation mask
If REQ_NOWAIT is set, then do a non-blocking allocation if the operation is a write and we need to insert a new page. Currently REQ_NOWAIT cannot be set as the queue isn't marked as supporting nowait, this change is in preparation for allowing that. radix_tree_preload() warns on attempting to call it with an allocation mask that doesn't allow blocking. While that warning could arguably be removed, we need to handle radix insertion failures anyway as they are more likely if we cannot block to get memory. Remove legacy BUG_ON()'s and turn them into proper errors instead, one for the allocation failure and one for finding a page that doesn't match the correct index. Cc: [email protected] # 5.10+ Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent db0ccc4 commit 6ded703

File tree

1 file changed

+28
-20
lines changed

1 file changed

+28
-20
lines changed

drivers/block/brd.c

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -80,26 +80,21 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
8080
/*
8181
* Insert a new page for a given sector, if one does not already exist.
8282
*/
83-
static int brd_insert_page(struct brd_device *brd, sector_t sector)
83+
static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
8484
{
8585
pgoff_t idx;
8686
struct page *page;
87-
gfp_t gfp_flags;
87+
int ret = 0;
8888

8989
page = brd_lookup_page(brd, sector);
9090
if (page)
9191
return 0;
9292

93-
/*
94-
* Must use NOIO because we don't want to recurse back into the
95-
* block or filesystem layers from page reclaim.
96-
*/
97-
gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
98-
page = alloc_page(gfp_flags);
93+
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
9994
if (!page)
10095
return -ENOMEM;
10196

102-
if (radix_tree_preload(GFP_NOIO)) {
97+
if (gfpflags_allow_blocking(gfp) && radix_tree_preload(gfp)) {
10398
__free_page(page);
10499
return -ENOMEM;
105100
}
@@ -110,15 +105,17 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector)
110105
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
111106
__free_page(page);
112107
page = radix_tree_lookup(&brd->brd_pages, idx);
113-
BUG_ON(!page);
114-
BUG_ON(page->index != idx);
108+
if (!page)
109+
ret = -ENOMEM;
110+
else if (page->index != idx)
111+
ret = -EIO;
115112
} else {
116113
brd->brd_nr_pages++;
117114
}
118115
spin_unlock(&brd->brd_lock);
119116

120117
radix_tree_preload_end();
121-
return 0;
118+
return ret;
122119
}
123120

124121
/*
@@ -167,19 +164,20 @@ static void brd_free_pages(struct brd_device *brd)
167164
/*
168165
* copy_to_brd_setup must be called before copy_to_brd. It may sleep.
169166
*/
170-
static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
167+
static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
168+
gfp_t gfp)
171169
{
172170
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
173171
size_t copy;
174172
int ret;
175173

176174
copy = min_t(size_t, n, PAGE_SIZE - offset);
177-
ret = brd_insert_page(brd, sector);
175+
ret = brd_insert_page(brd, sector, gfp);
178176
if (ret)
179177
return ret;
180178
if (copy < n) {
181179
sector += copy >> SECTOR_SHIFT;
182-
ret = brd_insert_page(brd, sector);
180+
ret = brd_insert_page(brd, sector, gfp);
183181
}
184182
return ret;
185183
}
@@ -254,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
254252
* Process a single bvec of a bio.
255253
*/
256254
static int brd_do_bvec(struct brd_device *brd, struct page *page,
257-
unsigned int len, unsigned int off, enum req_op op,
255+
unsigned int len, unsigned int off, blk_opf_t opf,
258256
sector_t sector)
259257
{
260258
void *mem;
261259
int err = 0;
262260

263-
if (op_is_write(op)) {
264-
err = copy_to_brd_setup(brd, sector, len);
261+
if (op_is_write(opf)) {
262+
/*
263+
* Must use NOIO because we don't want to recurse back into the
264+
* block or filesystem layers from page reclaim.
265+
*/
266+
gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
267+
268+
err = copy_to_brd_setup(brd, sector, len, gfp);
265269
if (err)
266270
goto out;
267271
}
268272

269273
mem = kmap_atomic(page);
270-
if (!op_is_write(op)) {
274+
if (!op_is_write(opf)) {
271275
copy_from_brd(mem + off, brd, sector, len);
272276
flush_dcache_page(page);
273277
} else {
@@ -296,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
296300
(len & (SECTOR_SIZE - 1)));
297301

298302
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
299-
bio_op(bio), sector);
303+
bio->bi_opf, sector);
300304
if (err) {
305+
if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
306+
bio_wouldblock_error(bio);
307+
return;
308+
}
301309
bio_io_error(bio);
302310
return;
303311
}

0 commit comments

Comments
 (0)