Skip to content

Commit 3798754

Browse files
Kanchan Joshiaxboe
authored andcommitted
block: extend functionality to map bvec iterator
Extend blk_rq_map_user_iov so that it can handle bvec iterator, using the new blk_rq_map_user_bvec function. It maps the pages from bvec iterator into a bio and place the bio into request. This helper will be used by nvme for uring-passthrough path when IO is done using pre-mapped buffers. Signed-off-by: Kanchan Joshi <[email protected]> Signed-off-by: Anuj Gupta <[email protected]> Suggested-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent ab89e8e commit 3798754

File tree

1 file changed

+71
-4
lines changed

1 file changed

+71
-4
lines changed

block/blk-map.c

Lines changed: 71 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -548,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
548548
}
549549
EXPORT_SYMBOL(blk_rq_append_bio);
550550

551+
/* Prepare bio for passthrough IO given ITER_BVEC iter */
552+
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
553+
{
554+
struct request_queue *q = rq->q;
555+
size_t nr_iter = iov_iter_count(iter);
556+
size_t nr_segs = iter->nr_segs;
557+
struct bio_vec *bvecs, *bvprvp = NULL;
558+
struct queue_limits *lim = &q->limits;
559+
unsigned int nsegs = 0, bytes = 0;
560+
struct bio *bio;
561+
size_t i;
562+
563+
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
564+
return -EINVAL;
565+
if (nr_segs > queue_max_segments(q))
566+
return -EINVAL;
567+
568+
/* no iovecs to alloc, as we already have a BVEC iterator */
569+
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
570+
if (bio == NULL)
571+
return -ENOMEM;
572+
573+
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
574+
blk_rq_bio_prep(rq, bio, nr_segs);
575+
576+
/* loop to perform a bunch of sanity checks */
577+
bvecs = (struct bio_vec *)iter->bvec;
578+
for (i = 0; i < nr_segs; i++) {
579+
struct bio_vec *bv = &bvecs[i];
580+
581+
/*
582+
* If the queue doesn't support SG gaps and adding this
583+
* offset would create a gap, fallback to copy.
584+
*/
585+
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
586+
blk_mq_map_bio_put(bio);
587+
return -EREMOTEIO;
588+
}
589+
/* check full condition */
590+
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
591+
goto put_bio;
592+
if (bytes + bv->bv_len > nr_iter)
593+
goto put_bio;
594+
if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
595+
goto put_bio;
596+
597+
nsegs++;
598+
bytes += bv->bv_len;
599+
bvprvp = bv;
600+
}
601+
return 0;
602+
put_bio:
603+
blk_mq_map_bio_put(bio);
604+
return -EINVAL;
605+
}
606+
551607
/**
552608
* blk_rq_map_user_iov - map user data to a request, for passthrough requests
553609
* @q: request queue where request should be inserted
@@ -567,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
567623
struct rq_map_data *map_data,
568624
const struct iov_iter *iter, gfp_t gfp_mask)
569625
{
570-
bool copy = false;
626+
bool copy = false, map_bvec = false;
571627
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
572628
struct bio *bio = NULL;
573629
struct iov_iter i;
574630
int ret = -EINVAL;
575631

576-
if (!iter_is_iovec(iter))
577-
goto fail;
578-
579632
if (map_data)
580633
copy = true;
581634
else if (blk_queue_may_bounce(q))
582635
copy = true;
583636
else if (iov_iter_alignment(iter) & align)
584637
copy = true;
638+
else if (iov_iter_is_bvec(iter))
639+
map_bvec = true;
640+
else if (!iter_is_iovec(iter))
641+
copy = true;
585642
else if (queue_virt_boundary(q))
586643
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
587644

645+
if (map_bvec) {
646+
ret = blk_rq_map_user_bvec(rq, iter);
647+
if (!ret)
648+
return 0;
649+
if (ret != -EREMOTEIO)
650+
goto fail;
651+
/* fall back to copying the data on limits mismatches */
652+
copy = true;
653+
}
654+
588655
i = *iter;
589656
do {
590657
if (copy)

0 commit comments

Comments
 (0)