Skip to content

Commit 9e6b475

Browse files
Ming Leiaxboe
authored andcommitted
ublk: prepare for supporting to register request buffer automatically
UBLK_F_SUPPORT_ZERO_COPY requires ublk server to issue explicit buffer register/unregister uring_cmd for each IO, this way is not only inefficient, but also introduce dependency between buffer consumer and buffer register/ unregister uring_cmd, please see tools/testing/selftests/ublk/stripe.c in which backing file IO has to be issued one by one by IOSQE_IO_LINK. Prepare for adding feature UBLK_F_AUTO_BUF_REG for addressing the existing zero copy limitation: - register request buffer automatically to ublk uring_cmd's io_uring context before delivering io command to ublk server - unregister request buffer automatically from the ublk uring_cmd's io_uring context when completing the request - io_uring will unregister the buffer automatically when uring is exiting, so we needn't worry about accident exit For using this feature, ublk server has to create one sparse buffer table Reviewed-by: Caleb Sander Mateos <[email protected]> Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent b1c3b46 commit 9e6b475

File tree

1 file changed

+64
-6
lines changed

1 file changed

+64
-6
lines changed

drivers/block/ublk_drv.c

Lines changed: 64 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,14 @@ struct ublk_uring_cmd_pdu {
133133
*/
134134
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
135135

136+
/*
137+
* request buffer is registered automatically, so we have to unregister it
138+
* before completing this request.
139+
*
140+
* io_uring will unregister buffer automatically for us during exiting.
141+
*/
142+
#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
143+
136144
/* atomic RW with ubq->cancel_lock */
137145
#define UBLK_IO_FLAG_CANCELED 0x80000000
138146

@@ -205,6 +213,7 @@ struct ublk_params_header {
205213
__u32 types;
206214
};
207215

216+
static void ublk_io_release(void *priv);
208217
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
209218
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
210219
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
@@ -619,14 +628,20 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
619628
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
620629
}
621630

631+
static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
632+
{
633+
return false;
634+
}
635+
622636
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
623637
{
624638
return ubq->flags & UBLK_F_USER_COPY;
625639
}
626640

627641
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
628642
{
629-
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
643+
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
644+
!ublk_support_auto_buf_reg(ubq);
630645
}
631646

632647
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -637,8 +652,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
637652
*
638653
* for zero copy, request buffer need to be registered to io_uring
639654
* buffer table, so reference is needed
655+
*
656+
* For auto buffer register, ublk server still may issue
657+
* UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
658+
* so reference is required too.
640659
*/
641-
return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
660+
return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
661+
ublk_support_auto_buf_reg(ubq);
642662
}
643663

644664
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -1155,6 +1175,35 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
11551175
blk_mq_end_request(rq, BLK_STS_IOERR);
11561176
}
11571177

1178+
static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
1179+
unsigned int issue_flags)
1180+
{
1181+
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
1182+
int ret;
1183+
1184+
ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release, 0,
1185+
issue_flags);
1186+
if (ret) {
1187+
blk_mq_end_request(req, BLK_STS_IOERR);
1188+
return false;
1189+
}
1190+
/* one extra reference is dropped by ublk_io_release */
1191+
refcount_set(&data->ref, 2);
1192+
io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
1193+
return true;
1194+
}
1195+
1196+
static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
1197+
struct request *req, struct ublk_io *io,
1198+
unsigned int issue_flags)
1199+
{
1200+
if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
1201+
return ublk_auto_buf_reg(req, io, issue_flags);
1202+
1203+
ublk_init_req_ref(ubq, req);
1204+
return true;
1205+
}
1206+
11581207
static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
11591208
struct ublk_io *io)
11601209
{
@@ -1180,7 +1229,6 @@ static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
11801229
mapped_bytes >> 9;
11811230
}
11821231

1183-
ublk_init_req_ref(ubq, req);
11841232
return true;
11851233
}
11861234

@@ -1226,7 +1274,8 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
12261274
if (!ublk_start_io(ubq, req, io))
12271275
return;
12281276

1229-
ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
1277+
if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
1278+
ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
12301279
}
12311280

12321281
static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
@@ -1994,7 +2043,8 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
19942043

19952044
static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
19962045
struct ublk_io *io, struct io_uring_cmd *cmd,
1997-
const struct ublksrv_io_cmd *ub_cmd)
2046+
const struct ublksrv_io_cmd *ub_cmd,
2047+
unsigned int issue_flags)
19982048
{
19992049
struct request *req = io->req;
20002050

@@ -2014,6 +2064,14 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
20142064
return -EINVAL;
20152065
}
20162066

2067+
if (ublk_support_auto_buf_reg(ubq)) {
2068+
if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
2069+
WARN_ON_ONCE(io_buffer_unregister_bvec(cmd, 0,
2070+
issue_flags));
2071+
io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
2072+
}
2073+
}
2074+
20172075
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
20182076

20192077
/* now this cmd slot is owned by ublk driver */
@@ -2110,7 +2168,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
21102168
goto out;
21112169
break;
21122170
case UBLK_IO_COMMIT_AND_FETCH_REQ:
2113-
ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd);
2171+
ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
21142172
if (ret)
21152173
goto out;
21162174
break;

0 commit comments

Comments
 (0)