24
24
/* The maximum number of sg elements that fit into a virtqueue */
25
25
#define VIRTIO_BLK_MAX_SG_ELEMS 32768
26
26
27
+ #ifdef CONFIG_ARCH_NO_SG_CHAIN
28
+ #define VIRTIO_BLK_INLINE_SG_CNT 0
29
+ #else
30
+ #define VIRTIO_BLK_INLINE_SG_CNT 2
31
+ #endif
32
+
33
+ static unsigned int num_request_queues ;
34
+ module_param (num_request_queues , uint , 0644 );
35
+ MODULE_PARM_DESC (num_request_queues ,
36
+ "Limit the number of request queues to use for blk device. "
37
+ "0 for no limit. "
38
+ "Values > nr_cpu_ids truncated to nr_cpu_ids." );
39
+
27
40
static int major ;
28
41
static DEFINE_IDA (vd_index_ida );
29
42
@@ -77,6 +90,7 @@ struct virtio_blk {
77
90
struct virtblk_req {
78
91
struct virtio_blk_outhdr out_hdr ;
79
92
u8 status ;
93
+ struct sg_table sg_table ;
80
94
struct scatterlist sg [];
81
95
};
82
96
@@ -162,12 +176,93 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
162
176
return 0 ;
163
177
}
164
178
165
- static inline void virtblk_request_done (struct request * req )
179
+ static void virtblk_unmap_data (struct request * req , struct virtblk_req * vbr )
166
180
{
167
- struct virtblk_req * vbr = blk_mq_rq_to_pdu (req );
181
+ if (blk_rq_nr_phys_segments (req ))
182
+ sg_free_table_chained (& vbr -> sg_table ,
183
+ VIRTIO_BLK_INLINE_SG_CNT );
184
+ }
185
+
186
+ static int virtblk_map_data (struct blk_mq_hw_ctx * hctx , struct request * req ,
187
+ struct virtblk_req * vbr )
188
+ {
189
+ int err ;
190
+
191
+ if (!blk_rq_nr_phys_segments (req ))
192
+ return 0 ;
193
+
194
+ vbr -> sg_table .sgl = vbr -> sg ;
195
+ err = sg_alloc_table_chained (& vbr -> sg_table ,
196
+ blk_rq_nr_phys_segments (req ),
197
+ vbr -> sg_table .sgl ,
198
+ VIRTIO_BLK_INLINE_SG_CNT );
199
+ if (unlikely (err ))
200
+ return - ENOMEM ;
168
201
202
+ return blk_rq_map_sg (hctx -> queue , req , vbr -> sg_table .sgl );
203
+ }
204
+
205
+ static void virtblk_cleanup_cmd (struct request * req )
206
+ {
169
207
if (req -> rq_flags & RQF_SPECIAL_PAYLOAD )
170
208
kfree (bvec_virt (& req -> special_vec ));
209
+ }
210
+
211
+ static blk_status_t virtblk_setup_cmd (struct virtio_device * vdev ,
212
+ struct request * req ,
213
+ struct virtblk_req * vbr )
214
+ {
215
+ bool unmap = false;
216
+ u32 type ;
217
+
218
+ vbr -> out_hdr .sector = 0 ;
219
+
220
+ switch (req_op (req )) {
221
+ case REQ_OP_READ :
222
+ type = VIRTIO_BLK_T_IN ;
223
+ vbr -> out_hdr .sector = cpu_to_virtio64 (vdev ,
224
+ blk_rq_pos (req ));
225
+ break ;
226
+ case REQ_OP_WRITE :
227
+ type = VIRTIO_BLK_T_OUT ;
228
+ vbr -> out_hdr .sector = cpu_to_virtio64 (vdev ,
229
+ blk_rq_pos (req ));
230
+ break ;
231
+ case REQ_OP_FLUSH :
232
+ type = VIRTIO_BLK_T_FLUSH ;
233
+ break ;
234
+ case REQ_OP_DISCARD :
235
+ type = VIRTIO_BLK_T_DISCARD ;
236
+ break ;
237
+ case REQ_OP_WRITE_ZEROES :
238
+ type = VIRTIO_BLK_T_WRITE_ZEROES ;
239
+ unmap = !(req -> cmd_flags & REQ_NOUNMAP );
240
+ break ;
241
+ case REQ_OP_DRV_IN :
242
+ type = VIRTIO_BLK_T_GET_ID ;
243
+ break ;
244
+ default :
245
+ WARN_ON_ONCE (1 );
246
+ return BLK_STS_IOERR ;
247
+ }
248
+
249
+ vbr -> out_hdr .type = cpu_to_virtio32 (vdev , type );
250
+ vbr -> out_hdr .ioprio = cpu_to_virtio32 (vdev , req_get_ioprio (req ));
251
+
252
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ) {
253
+ if (virtblk_setup_discard_write_zeroes (req , unmap ))
254
+ return BLK_STS_RESOURCE ;
255
+ }
256
+
257
+ return 0 ;
258
+ }
259
+
260
+ static inline void virtblk_request_done (struct request * req )
261
+ {
262
+ struct virtblk_req * vbr = blk_mq_rq_to_pdu (req );
263
+
264
+ virtblk_unmap_data (req , vbr );
265
+ virtblk_cleanup_cmd (req );
171
266
blk_mq_end_request (req , virtblk_result (vbr ));
172
267
}
173
268
@@ -223,59 +318,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
223
318
unsigned long flags ;
224
319
unsigned int num ;
225
320
int qid = hctx -> queue_num ;
226
- int err ;
227
321
bool notify = false;
228
- bool unmap = false ;
229
- u32 type ;
322
+ blk_status_t status ;
323
+ int err ;
230
324
231
325
BUG_ON (req -> nr_phys_segments + 2 > vblk -> sg_elems );
232
326
233
- switch (req_op (req )) {
234
- case REQ_OP_READ :
235
- case REQ_OP_WRITE :
236
- type = 0 ;
237
- break ;
238
- case REQ_OP_FLUSH :
239
- type = VIRTIO_BLK_T_FLUSH ;
240
- break ;
241
- case REQ_OP_DISCARD :
242
- type = VIRTIO_BLK_T_DISCARD ;
243
- break ;
244
- case REQ_OP_WRITE_ZEROES :
245
- type = VIRTIO_BLK_T_WRITE_ZEROES ;
246
- unmap = !(req -> cmd_flags & REQ_NOUNMAP );
247
- break ;
248
- case REQ_OP_DRV_IN :
249
- type = VIRTIO_BLK_T_GET_ID ;
250
- break ;
251
- default :
252
- WARN_ON_ONCE (1 );
253
- return BLK_STS_IOERR ;
254
- }
255
-
256
- vbr -> out_hdr .type = cpu_to_virtio32 (vblk -> vdev , type );
257
- vbr -> out_hdr .sector = type ?
258
- 0 : cpu_to_virtio64 (vblk -> vdev , blk_rq_pos (req ));
259
- vbr -> out_hdr .ioprio = cpu_to_virtio32 (vblk -> vdev , req_get_ioprio (req ));
327
+ status = virtblk_setup_cmd (vblk -> vdev , req , vbr );
328
+ if (unlikely (status ))
329
+ return status ;
260
330
261
331
blk_mq_start_request (req );
262
332
263
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ) {
264
- err = virtblk_setup_discard_write_zeroes (req , unmap );
265
- if (err )
266
- return BLK_STS_RESOURCE ;
267
- }
268
-
269
- num = blk_rq_map_sg (hctx -> queue , req , vbr -> sg );
270
- if (num ) {
271
- if (rq_data_dir (req ) == WRITE )
272
- vbr -> out_hdr .type |= cpu_to_virtio32 (vblk -> vdev , VIRTIO_BLK_T_OUT );
273
- else
274
- vbr -> out_hdr .type |= cpu_to_virtio32 (vblk -> vdev , VIRTIO_BLK_T_IN );
333
+ num = virtblk_map_data (hctx , req , vbr );
334
+ if (unlikely (num < 0 )) {
335
+ virtblk_cleanup_cmd (req );
336
+ return BLK_STS_RESOURCE ;
275
337
}
276
338
277
339
spin_lock_irqsave (& vblk -> vqs [qid ].lock , flags );
278
- err = virtblk_add_req (vblk -> vqs [qid ].vq , vbr , vbr -> sg , num );
340
+ err = virtblk_add_req (vblk -> vqs [qid ].vq , vbr , vbr -> sg_table . sgl , num );
279
341
if (err ) {
280
342
virtqueue_kick (vblk -> vqs [qid ].vq );
281
343
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -284,6 +346,8 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
284
346
if (err == - ENOSPC )
285
347
blk_mq_stop_hw_queue (hctx );
286
348
spin_unlock_irqrestore (& vblk -> vqs [qid ].lock , flags );
349
+ virtblk_unmap_data (req , vbr );
350
+ virtblk_cleanup_cmd (req );
287
351
switch (err ) {
288
352
case - ENOSPC :
289
353
return BLK_STS_DEV_RESOURCE ;
@@ -497,8 +561,14 @@ static int init_vq(struct virtio_blk *vblk)
497
561
& num_vqs );
498
562
if (err )
499
563
num_vqs = 1 ;
564
+ if (!err && !num_vqs ) {
565
+ dev_err (& vdev -> dev , "MQ advertised but zero queues reported\n" );
566
+ return - EINVAL ;
567
+ }
500
568
501
- num_vqs = min_t (unsigned int , nr_cpu_ids , num_vqs );
569
+ num_vqs = min_t (unsigned int ,
570
+ min_not_zero (num_request_queues , nr_cpu_ids ),
571
+ num_vqs );
502
572
503
573
vblk -> vqs = kmalloc_array (num_vqs , sizeof (* vblk -> vqs ), GFP_KERNEL );
504
574
if (!vblk -> vqs )
@@ -624,7 +694,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
624
694
u8 writeback = virtblk_get_cache_mode (vblk -> vdev );
625
695
626
696
BUG_ON (writeback >= ARRAY_SIZE (virtblk_cache_types ));
627
- return snprintf (buf , 40 , "%s\n" , virtblk_cache_types [writeback ]);
697
+ return sysfs_emit (buf , "%s\n" , virtblk_cache_types [writeback ]);
628
698
}
629
699
630
700
static DEVICE_ATTR_RW (cache_type );
@@ -660,16 +730,6 @@ static const struct attribute_group *virtblk_attr_groups[] = {
660
730
NULL ,
661
731
};
662
732
663
- static int virtblk_init_request (struct blk_mq_tag_set * set , struct request * rq ,
664
- unsigned int hctx_idx , unsigned int numa_node )
665
- {
666
- struct virtio_blk * vblk = set -> driver_data ;
667
- struct virtblk_req * vbr = blk_mq_rq_to_pdu (rq );
668
-
669
- sg_init_table (vbr -> sg , vblk -> sg_elems );
670
- return 0 ;
671
- }
672
-
673
733
static int virtblk_map_queues (struct blk_mq_tag_set * set )
674
734
{
675
735
struct virtio_blk * vblk = set -> driver_data ;
@@ -682,7 +742,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
682
742
.queue_rq = virtio_queue_rq ,
683
743
.commit_rqs = virtio_commit_rqs ,
684
744
.complete = virtblk_request_done ,
685
- .init_request = virtblk_init_request ,
686
745
.map_queues = virtblk_map_queues ,
687
746
};
688
747
@@ -762,7 +821,7 @@ static int virtblk_probe(struct virtio_device *vdev)
762
821
vblk -> tag_set .flags = BLK_MQ_F_SHOULD_MERGE ;
763
822
vblk -> tag_set .cmd_size =
764
823
sizeof (struct virtblk_req ) +
765
- sizeof (struct scatterlist ) * sg_elems ;
824
+ sizeof (struct scatterlist ) * VIRTIO_BLK_INLINE_SG_CNT ;
766
825
vblk -> tag_set .driver_data = vblk ;
767
826
vblk -> tag_set .nr_hw_queues = vblk -> num_vqs ;
768
827
@@ -990,6 +1049,7 @@ static struct virtio_driver virtio_blk = {
990
1049
.feature_table_size = ARRAY_SIZE (features ),
991
1050
.feature_table_legacy = features_legacy ,
992
1051
.feature_table_size_legacy = ARRAY_SIZE (features_legacy ),
1052
+ .suppress_used_validation = true,
993
1053
.driver .name = KBUILD_MODNAME ,
994
1054
.driver .owner = THIS_MODULE ,
995
1055
.id_table = id_table ,
0 commit comments