@@ -493,13 +493,15 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
493
493
d .timeout_ms = READ_ONCE (cmd -> timeout_ms );
494
494
495
495
if (d .data_len && (ioucmd -> flags & IORING_URING_CMD_FIXED )) {
496
- /* fixedbufs is only for non-vectored io */
497
- if (vec )
498
- return - EINVAL ;
496
+ int ddir = nvme_is_write (& c ) ? WRITE : READ ;
499
497
500
- ret = io_uring_cmd_import_fixed (d .addr , d .data_len ,
501
- nvme_is_write (& c ) ? WRITE : READ , & iter , ioucmd ,
502
- issue_flags );
498
+ if (vec )
499
+ ret = io_uring_cmd_import_fixed_vec (ioucmd ,
500
+ u64_to_user_ptr (d .addr ), d .data_len ,
501
+ ddir , & iter , issue_flags );
502
+ else
503
+ ret = io_uring_cmd_import_fixed (d .addr , d .data_len ,
504
+ ddir , & iter , ioucmd , issue_flags );
503
505
if (ret < 0 )
504
506
return ret ;
505
507
@@ -521,7 +523,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
521
523
if (d .data_len ) {
522
524
ret = nvme_map_user_request (req , d .addr , d .data_len ,
523
525
nvme_to_user_ptr (d .metadata ), d .metadata_len ,
524
- map_iter , vec );
526
+ map_iter , vec ? NVME_IOCTL_VEC : 0 );
525
527
if (ret )
526
528
goto out_free_req ;
527
529
}
@@ -727,7 +729,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
727
729
728
730
/*
729
731
* Handle ioctls that apply to the controller instead of the namespace
730
- * seperately and drop the ns SRCU reference early. This avoids a
732
+ * separately and drop the ns SRCU reference early. This avoids a
731
733
* deadlock when deleting namespaces using the passthrough interface.
732
734
*/
733
735
if (is_ctrl_ioctl (cmd ))
0 commit comments