49
49
/* All UBLK_F_* have to be included into UBLK_F_ALL */
50
50
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK)
51
51
52
+ /* All UBLK_PARAM_TYPE_* should be included here */
53
+ #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
54
+
52
55
struct ublk_rq_data {
53
56
struct callback_head work ;
54
57
};
@@ -137,6 +140,8 @@ struct ublk_device {
137
140
spinlock_t mm_lock ;
138
141
struct mm_struct * mm ;
139
142
143
+ struct ublk_params params ;
144
+
140
145
struct completion completion ;
141
146
unsigned int nr_queues_ready ;
142
147
atomic_t nr_aborted_queues ;
@@ -149,6 +154,12 @@ struct ublk_device {
149
154
struct work_struct stop_work ;
150
155
};
151
156
157
+ /* header of ublk_params */
158
+ struct ublk_params_header {
159
+ __u32 len ;
160
+ __u32 types ;
161
+ };
162
+
152
163
static dev_t ublk_chr_devt ;
153
164
static struct class * ublk_chr_class ;
154
165
@@ -160,6 +171,91 @@ static DEFINE_MUTEX(ublk_ctl_mutex);
160
171
161
172
static struct miscdevice ublk_misc ;
162
173
174
+ static void ublk_dev_param_basic_apply (struct ublk_device * ub )
175
+ {
176
+ struct request_queue * q = ub -> ub_disk -> queue ;
177
+ const struct ublk_param_basic * p = & ub -> params .basic ;
178
+
179
+ blk_queue_logical_block_size (q , 1 << p -> logical_bs_shift );
180
+ blk_queue_physical_block_size (q , 1 << p -> physical_bs_shift );
181
+ blk_queue_io_min (q , 1 << p -> io_min_shift );
182
+ blk_queue_io_opt (q , 1 << p -> io_opt_shift );
183
+
184
+ blk_queue_write_cache (q , p -> attrs & UBLK_ATTR_VOLATILE_CACHE ,
185
+ p -> attrs & UBLK_ATTR_FUA );
186
+ if (p -> attrs & UBLK_ATTR_ROTATIONAL )
187
+ blk_queue_flag_clear (QUEUE_FLAG_NONROT , q );
188
+ else
189
+ blk_queue_flag_set (QUEUE_FLAG_NONROT , q );
190
+
191
+ blk_queue_max_hw_sectors (q , p -> max_sectors );
192
+ blk_queue_chunk_sectors (q , p -> chunk_sectors );
193
+ blk_queue_virt_boundary (q , p -> virt_boundary_mask );
194
+
195
+ if (p -> attrs & UBLK_ATTR_READ_ONLY )
196
+ set_disk_ro (ub -> ub_disk , true);
197
+
198
+ set_capacity (ub -> ub_disk , p -> dev_sectors );
199
+ }
200
+
201
+ static void ublk_dev_param_discard_apply (struct ublk_device * ub )
202
+ {
203
+ struct request_queue * q = ub -> ub_disk -> queue ;
204
+ const struct ublk_param_discard * p = & ub -> params .discard ;
205
+
206
+ q -> limits .discard_alignment = p -> discard_alignment ;
207
+ q -> limits .discard_granularity = p -> discard_granularity ;
208
+ blk_queue_max_discard_sectors (q , p -> max_discard_sectors );
209
+ blk_queue_max_write_zeroes_sectors (q ,
210
+ p -> max_write_zeroes_sectors );
211
+ blk_queue_max_discard_segments (q , p -> max_discard_segments );
212
+ }
213
+
214
+ static int ublk_validate_params (const struct ublk_device * ub )
215
+ {
216
+ /* basic param is the only one which must be set */
217
+ if (ub -> params .types & UBLK_PARAM_TYPE_BASIC ) {
218
+ const struct ublk_param_basic * p = & ub -> params .basic ;
219
+
220
+ if (p -> logical_bs_shift > PAGE_SHIFT )
221
+ return - EINVAL ;
222
+
223
+ if (p -> logical_bs_shift > p -> physical_bs_shift )
224
+ return - EINVAL ;
225
+
226
+ if (p -> max_sectors > (ub -> dev_info .rq_max_blocks <<
227
+ (ub -> bs_shift - 9 )))
228
+ return - EINVAL ;
229
+ } else
230
+ return - EINVAL ;
231
+
232
+ if (ub -> params .types & UBLK_PARAM_TYPE_DISCARD ) {
233
+ const struct ublk_param_discard * p = & ub -> params .discard ;
234
+
235
+ /* So far, only support single segment discard */
236
+ if (p -> max_discard_sectors && p -> max_discard_segments != 1 )
237
+ return - EINVAL ;
238
+
239
+ if (!p -> discard_granularity )
240
+ return - EINVAL ;
241
+ }
242
+
243
+ return 0 ;
244
+ }
245
+
246
+ static int ublk_apply_params (struct ublk_device * ub )
247
+ {
248
+ if (!(ub -> params .types & UBLK_PARAM_TYPE_BASIC ))
249
+ return - EINVAL ;
250
+
251
+ ublk_dev_param_basic_apply (ub );
252
+
253
+ if (ub -> params .types & UBLK_PARAM_TYPE_DISCARD )
254
+ ublk_dev_param_discard_apply (ub );
255
+
256
+ return 0 ;
257
+ }
258
+
163
259
static inline bool ublk_can_use_task_work (const struct ublk_queue * ubq )
164
260
{
165
261
if (IS_BUILTIN (CONFIG_BLK_DEV_UBLK ) &&
@@ -1138,7 +1234,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
1138
1234
{
1139
1235
struct ublksrv_ctrl_cmd * header = (struct ublksrv_ctrl_cmd * )cmd -> cmd ;
1140
1236
int ublksrv_pid = (int )header -> data [0 ];
1141
- unsigned long dev_blocks = header -> data [1 ];
1142
1237
struct ublk_device * ub ;
1143
1238
struct gendisk * disk ;
1144
1239
int ret = - EINVAL ;
@@ -1161,10 +1256,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
1161
1256
goto out_unlock ;
1162
1257
}
1163
1258
1164
- /* We may get disk size updated */
1165
- if (dev_blocks )
1166
- ub -> dev_info .dev_blocks = dev_blocks ;
1167
-
1168
1259
disk = blk_mq_alloc_disk (& ub -> tag_set , ub );
1169
1260
if (IS_ERR (disk )) {
1170
1261
ret = PTR_ERR (disk );
@@ -1174,19 +1265,13 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
1174
1265
disk -> fops = & ub_fops ;
1175
1266
disk -> private_data = ub ;
1176
1267
1177
- blk_queue_logical_block_size (disk -> queue , ub -> dev_info .block_size );
1178
- blk_queue_physical_block_size (disk -> queue , ub -> dev_info .block_size );
1179
- blk_queue_io_min (disk -> queue , ub -> dev_info .block_size );
1180
- blk_queue_max_hw_sectors (disk -> queue ,
1181
- ub -> dev_info .rq_max_blocks << (ub -> bs_shift - 9 ));
1182
- disk -> queue -> limits .discard_granularity = PAGE_SIZE ;
1183
- blk_queue_max_discard_sectors (disk -> queue , UINT_MAX >> 9 );
1184
- blk_queue_max_write_zeroes_sectors (disk -> queue , UINT_MAX >> 9 );
1185
-
1186
- set_capacity (disk , ub -> dev_info .dev_blocks << (ub -> bs_shift - 9 ));
1187
-
1188
1268
ub -> dev_info .ublksrv_pid = ublksrv_pid ;
1189
1269
ub -> ub_disk = disk ;
1270
+
1271
+ ret = ublk_apply_params (ub );
1272
+ if (ret )
1273
+ goto out_put_disk ;
1274
+
1190
1275
get_device (& ub -> cdev_dev );
1191
1276
ret = add_disk (disk );
1192
1277
if (ret ) {
@@ -1195,11 +1280,13 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
1195
1280
* called in case of add_disk failure.
1196
1281
*/
1197
1282
ublk_put_device (ub );
1198
- put_disk (disk );
1199
- goto out_unlock ;
1283
+ goto out_put_disk ;
1200
1284
}
1201
1285
set_bit (UB_STATE_USED , & ub -> state );
1202
1286
ub -> dev_info .state = UBLK_S_DEV_LIVE ;
1287
+ out_put_disk :
1288
+ if (ret )
1289
+ put_disk (disk );
1203
1290
out_unlock :
1204
1291
mutex_unlock (& ub -> mutex );
1205
1292
ublk_put_device (ub );
@@ -1447,6 +1534,82 @@ static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
1447
1534
return ret ;
1448
1535
}
1449
1536
1537
+ static int ublk_ctrl_get_params (struct io_uring_cmd * cmd )
1538
+ {
1539
+ struct ublksrv_ctrl_cmd * header = (struct ublksrv_ctrl_cmd * )cmd -> cmd ;
1540
+ void __user * argp = (void __user * )(unsigned long )header -> addr ;
1541
+ struct ublk_params_header ph ;
1542
+ struct ublk_device * ub ;
1543
+ int ret ;
1544
+
1545
+ if (header -> len <= sizeof (ph ) || !header -> addr )
1546
+ return - EINVAL ;
1547
+
1548
+ if (copy_from_user (& ph , argp , sizeof (ph )))
1549
+ return - EFAULT ;
1550
+
1551
+ if (ph .len > header -> len || !ph .len )
1552
+ return - EINVAL ;
1553
+
1554
+ if (ph .len > sizeof (struct ublk_params ))
1555
+ ph .len = sizeof (struct ublk_params );
1556
+
1557
+ ub = ublk_get_device_from_id (header -> dev_id );
1558
+ if (!ub )
1559
+ return - EINVAL ;
1560
+
1561
+ mutex_lock (& ub -> mutex );
1562
+ if (copy_to_user (argp , & ub -> params , ph .len ))
1563
+ ret = - EFAULT ;
1564
+ else
1565
+ ret = 0 ;
1566
+ mutex_unlock (& ub -> mutex );
1567
+
1568
+ ublk_put_device (ub );
1569
+ return ret ;
1570
+ }
1571
+
1572
+ static int ublk_ctrl_set_params (struct io_uring_cmd * cmd )
1573
+ {
1574
+ struct ublksrv_ctrl_cmd * header = (struct ublksrv_ctrl_cmd * )cmd -> cmd ;
1575
+ void __user * argp = (void __user * )(unsigned long )header -> addr ;
1576
+ struct ublk_params_header ph ;
1577
+ struct ublk_device * ub ;
1578
+ int ret = - EFAULT ;
1579
+
1580
+ if (header -> len <= sizeof (ph ) || !header -> addr )
1581
+ return - EINVAL ;
1582
+
1583
+ if (copy_from_user (& ph , argp , sizeof (ph )))
1584
+ return - EFAULT ;
1585
+
1586
+ if (ph .len > header -> len || !ph .len || !ph .types )
1587
+ return - EINVAL ;
1588
+
1589
+ if (ph .len > sizeof (struct ublk_params ))
1590
+ ph .len = sizeof (struct ublk_params );
1591
+
1592
+ ub = ublk_get_device_from_id (header -> dev_id );
1593
+ if (!ub )
1594
+ return - EINVAL ;
1595
+
1596
+ /* parameters can only be changed when device isn't live */
1597
+ mutex_lock (& ub -> mutex );
1598
+ if (ub -> dev_info .state == UBLK_S_DEV_LIVE ) {
1599
+ ret = - EACCES ;
1600
+ } else if (copy_from_user (& ub -> params , argp , ph .len )) {
1601
+ ret = - EFAULT ;
1602
+ } else {
1603
+ /* clear all we don't support yet */
1604
+ ub -> params .types &= UBLK_PARAM_TYPE_ALL ;
1605
+ ret = ublk_validate_params (ub );
1606
+ }
1607
+ mutex_unlock (& ub -> mutex );
1608
+ ublk_put_device (ub );
1609
+
1610
+ return ret ;
1611
+ }
1612
+
1450
1613
static int ublk_ctrl_uring_cmd (struct io_uring_cmd * cmd ,
1451
1614
unsigned int issue_flags )
1452
1615
{
@@ -1482,6 +1645,12 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
1482
1645
case UBLK_CMD_GET_QUEUE_AFFINITY :
1483
1646
ret = ublk_ctrl_get_queue_affinity (cmd );
1484
1647
break ;
1648
+ case UBLK_CMD_GET_PARAMS :
1649
+ ret = ublk_ctrl_get_params (cmd );
1650
+ break ;
1651
+ case UBLK_CMD_SET_PARAMS :
1652
+ ret = ublk_ctrl_set_params (cmd );
1653
+ break ;
1485
1654
default :
1486
1655
break ;
1487
1656
}
0 commit comments