Skip to content

Commit 755a184

Browse files
calebsanderaxboe
authored andcommitted
ublk: don't access ublk_queue in ublk_unmap_io()
For ublk servers with many ublk queues, accessing the ublk_queue in ublk_unmap_io() is a frequent cache miss. Pass to __ublk_complete_rq() whether the ublk server's data buffer needs to be copied to the request. In the callers __ublk_fail_req() and ublk_ch_uring_cmd_local(), get the flags from the ublk_device instead, as its flags have just been read. In ublk_put_req_ref(), pass false since all the features that require reference counting disable copying of the data buffer upon completion. Signed-off-by: Caleb Sander Mateos <[email protected]> Reviewed-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 97a02be commit 755a184

File tree

1 file changed

+14
-10
lines changed

1 file changed

+14
-10
lines changed

drivers/block/ublk_drv.c

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
529529

530530
#endif
531531

532-
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io);
532+
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
533+
bool need_map);
533534

534535
static dev_t ublk_chr_devt;
535536
static const struct class ublk_chr_class = {
@@ -737,8 +738,11 @@ static inline bool ublk_get_req_ref(struct ublk_io *io)
737738

738739
static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
739740
{
740-
if (refcount_dec_and_test(&io->ref))
741-
__ublk_complete_rq(req, io);
741+
if (!refcount_dec_and_test(&io->ref))
742+
return;
743+
744+
/* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
745+
__ublk_complete_rq(req, io, false);
742746
}
743747

744748
static inline bool ublk_sub_req_ref(struct ublk_io *io)
@@ -1048,13 +1052,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
10481052
return rq_bytes;
10491053
}
10501054

1051-
static int ublk_unmap_io(const struct ublk_queue *ubq,
1055+
static int ublk_unmap_io(bool need_map,
10521056
const struct request *req,
10531057
const struct ublk_io *io)
10541058
{
10551059
const unsigned int rq_bytes = blk_rq_bytes(req);
10561060

1057-
if (!ublk_need_map_io(ubq))
1061+
if (!need_map)
10581062
return rq_bytes;
10591063

10601064
if (ublk_need_unmap_req(req)) {
@@ -1146,9 +1150,9 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
11461150
}
11471151

11481152
/* todo: handle partial completion */
1149-
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io)
1153+
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
1154+
bool need_map)
11501155
{
1151-
struct ublk_queue *ubq = req->mq_hctx->driver_data;
11521156
unsigned int unmapped_bytes;
11531157
blk_status_t res = BLK_STS_OK;
11541158

@@ -1172,7 +1176,7 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io)
11721176
goto exit;
11731177

11741178
/* for READ request, writing data in iod->addr to rq buffers */
1175-
unmapped_bytes = ublk_unmap_io(ubq, req, io);
1179+
unmapped_bytes = ublk_unmap_io(need_map, req, io);
11761180

11771181
/*
11781182
* Extremely impossible since we got data filled in just before
@@ -1749,7 +1753,7 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
17491753
blk_mq_requeue_request(req, false);
17501754
else {
17511755
io->res = -EIO;
1752-
__ublk_complete_rq(req, io);
1756+
__ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
17531757
}
17541758
}
17551759

@@ -2394,7 +2398,7 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
23942398
if (req_op(req) == REQ_OP_ZONE_APPEND)
23952399
req->__sector = addr;
23962400
if (compl)
2397-
__ublk_complete_rq(req, io);
2401+
__ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
23982402

23992403
if (ret)
24002404
goto out;

0 commit comments

Comments
 (0)