66
66
| UBLK_F_USER_COPY \
67
67
| UBLK_F_ZONED \
68
68
| UBLK_F_USER_RECOVERY_FAIL_IO \
69
- | UBLK_F_UPDATE_SIZE)
69
+ | UBLK_F_UPDATE_SIZE \
70
+ | UBLK_F_AUTO_BUF_REG)
70
71
71
72
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
72
73
| UBLK_F_USER_RECOVERY_REISSUE \
80
81
81
82
struct ublk_rq_data {
82
83
refcount_t ref ;
84
+
85
+ /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
86
+ u16 buf_index ;
83
87
};
84
88
85
89
struct ublk_uring_cmd_pdu {
@@ -101,6 +105,9 @@ struct ublk_uring_cmd_pdu {
101
105
* setup in ublk uring_cmd handler
102
106
*/
103
107
struct ublk_queue * ubq ;
108
+
109
+ struct ublk_auto_buf_reg buf ;
110
+
104
111
u16 tag ;
105
112
};
106
113
@@ -630,7 +637,7 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
630
637
631
638
static inline bool ublk_support_auto_buf_reg (const struct ublk_queue * ubq )
632
639
{
633
- return false ;
640
+ return ubq -> flags & UBLK_F_AUTO_BUF_REG ;
634
641
}
635
642
636
643
static inline bool ublk_support_user_copy (const struct ublk_queue * ubq )
@@ -1178,17 +1185,20 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1178
1185
static bool ublk_auto_buf_reg (struct request * req , struct ublk_io * io ,
1179
1186
unsigned int issue_flags )
1180
1187
{
1188
+ struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (io -> cmd );
1181
1189
struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1182
1190
int ret ;
1183
1191
1184
- ret = io_buffer_register_bvec (io -> cmd , req , ublk_io_release , 0 ,
1185
- issue_flags );
1192
+ ret = io_buffer_register_bvec (io -> cmd , req , ublk_io_release ,
1193
+ pdu -> buf . index , issue_flags );
1186
1194
if (ret ) {
1187
1195
blk_mq_end_request (req , BLK_STS_IOERR );
1188
1196
return false;
1189
1197
}
1190
1198
/* one extra reference is dropped by ublk_io_release */
1191
1199
refcount_set (& data -> ref , 2 );
1200
+ /* store buffer index in request payload */
1201
+ data -> buf_index = pdu -> buf .index ;
1192
1202
io -> flags |= UBLK_IO_FLAG_AUTO_BUF_REG ;
1193
1203
return true;
1194
1204
}
@@ -1952,6 +1962,18 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
1952
1962
io_uring_cmd_mark_cancelable (cmd , issue_flags );
1953
1963
}
1954
1964
1965
+ static inline int ublk_set_auto_buf_reg (struct io_uring_cmd * cmd )
1966
+ {
1967
+ struct ublk_uring_cmd_pdu * pdu = ublk_get_uring_cmd_pdu (cmd );
1968
+
1969
+ pdu -> buf = ublk_sqe_addr_to_auto_buf_reg (READ_ONCE (cmd -> sqe -> addr ));
1970
+
1971
+ if (pdu -> buf .reserved0 || pdu -> buf .reserved1 )
1972
+ return - EINVAL ;
1973
+
1974
+ return 0 ;
1975
+ }
1976
+
1955
1977
static void ublk_io_release (void * priv )
1956
1978
{
1957
1979
struct request * rq = priv ;
@@ -2034,6 +2056,12 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
2034
2056
goto out ;
2035
2057
}
2036
2058
2059
+ if (ublk_support_auto_buf_reg (ubq )) {
2060
+ ret = ublk_set_auto_buf_reg (cmd );
2061
+ if (ret )
2062
+ return ret ;
2063
+ }
2064
+
2037
2065
ublk_fill_io_cmd (io , cmd , buf_addr );
2038
2066
ublk_mark_io_ready (ub , ubq );
2039
2067
out :
@@ -2065,11 +2093,20 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
2065
2093
}
2066
2094
2067
2095
if (ublk_support_auto_buf_reg (ubq )) {
2096
+ int ret ;
2097
+
2068
2098
if (io -> flags & UBLK_IO_FLAG_AUTO_BUF_REG ) {
2069
- WARN_ON_ONCE (io_buffer_unregister_bvec (cmd , 0 ,
2099
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
2100
+
2101
+ WARN_ON_ONCE (io_buffer_unregister_bvec (cmd ,
2102
+ data -> buf_index ,
2070
2103
issue_flags ));
2071
2104
io -> flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG ;
2072
2105
}
2106
+
2107
+ ret = ublk_set_auto_buf_reg (cmd );
2108
+ if (ret )
2109
+ return ret ;
2073
2110
}
2074
2111
2075
2112
ublk_fill_io_cmd (io , cmd , ub_cmd -> addr );
@@ -2791,8 +2828,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
2791
2828
* For USER_COPY, we depends on userspace to fill request
2792
2829
* buffer by pwrite() to ublk char device, which can't be
2793
2830
* used for unprivileged device
2831
+ *
2832
+ * Same with zero copy or auto buffer register.
2794
2833
*/
2795
- if (info .flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY ))
2834
+ if (info .flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
2835
+ UBLK_F_AUTO_BUF_REG ))
2796
2836
return - EINVAL ;
2797
2837
}
2798
2838
@@ -2850,7 +2890,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
2850
2890
UBLK_F_URING_CMD_COMP_IN_TASK ;
2851
2891
2852
2892
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
2853
- if (ub -> dev_info .flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY ))
2893
+ if (ub -> dev_info .flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
2894
+ UBLK_F_AUTO_BUF_REG ))
2854
2895
ub -> dev_info .flags &= ~UBLK_F_NEED_GET_DATA ;
2855
2896
2856
2897
/*
@@ -3377,6 +3418,7 @@ static int __init ublk_init(void)
3377
3418
3378
3419
BUILD_BUG_ON ((u64 )UBLKSRV_IO_BUF_OFFSET +
3379
3420
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET );
3421
+ BUILD_BUG_ON (sizeof (struct ublk_auto_buf_reg ) != 8 );
3380
3422
3381
3423
init_waitqueue_head (& ublk_idr_wq );
3382
3424
0 commit comments