@@ -83,9 +83,10 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
83
83
return req ;
84
84
}
85
85
86
- static int nvme_map_user_request (struct request * req , void __user * ubuffer ,
86
+ static int nvme_map_user_request (struct request * req , u64 ubuffer ,
87
87
unsigned bufflen , void __user * meta_buffer , unsigned meta_len ,
88
- u32 meta_seed , void * * metap , bool vec )
88
+ u32 meta_seed , void * * metap , struct io_uring_cmd * ioucmd ,
89
+ bool vec )
89
90
{
90
91
struct request_queue * q = req -> q ;
91
92
struct nvme_ns * ns = q -> queuedata ;
@@ -94,8 +95,8 @@ static int nvme_map_user_request(struct request *req, void __user *ubuffer,
94
95
void * meta = NULL ;
95
96
int ret ;
96
97
97
- ret = blk_rq_map_user_io (req , NULL , ubuffer , bufflen , GFP_KERNEL , vec ,
98
- 0 , 0 , rq_data_dir (req ));
98
+ ret = blk_rq_map_user_io (req , NULL , nvme_to_user_ptr ( ubuffer ) , bufflen ,
99
+ GFP_KERNEL , vec , 0 , 0 , rq_data_dir (req ));
99
100
100
101
if (ret )
101
102
goto out ;
@@ -124,7 +125,7 @@ static int nvme_map_user_request(struct request *req, void __user *ubuffer,
124
125
}
125
126
126
127
static int nvme_submit_user_cmd (struct request_queue * q ,
127
- struct nvme_command * cmd , void __user * ubuffer ,
128
+ struct nvme_command * cmd , u64 ubuffer ,
128
129
unsigned bufflen , void __user * meta_buffer , unsigned meta_len ,
129
130
u32 meta_seed , u64 * result , unsigned timeout , bool vec )
130
131
{
@@ -142,7 +143,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
142
143
req -> timeout = timeout ;
143
144
if (ubuffer && bufflen ) {
144
145
ret = nvme_map_user_request (req , ubuffer , bufflen , meta_buffer ,
145
- meta_len , meta_seed , & meta , vec );
146
+ meta_len , meta_seed , & meta , NULL , vec );
146
147
if (ret )
147
148
return ret ;
148
149
}
@@ -226,7 +227,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
226
227
c .rw .appmask = cpu_to_le16 (io .appmask );
227
228
228
229
return nvme_submit_user_cmd (ns -> queue , & c ,
229
- nvme_to_user_ptr ( io .addr ) , length ,
230
+ io .addr , length ,
230
231
metadata , meta_len , lower_32_bits (io .slba ), NULL , 0 ,
231
232
false);
232
233
}
@@ -280,7 +281,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
280
281
timeout = msecs_to_jiffies (cmd .timeout_ms );
281
282
282
283
status = nvme_submit_user_cmd (ns ? ns -> queue : ctrl -> admin_q , & c ,
283
- nvme_to_user_ptr ( cmd .addr ) , cmd .data_len ,
284
+ cmd .addr , cmd .data_len ,
284
285
nvme_to_user_ptr (cmd .metadata ), cmd .metadata_len ,
285
286
0 , & result , timeout , false);
286
287
@@ -326,7 +327,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
326
327
timeout = msecs_to_jiffies (cmd .timeout_ms );
327
328
328
329
status = nvme_submit_user_cmd (ns ? ns -> queue : ctrl -> admin_q , & c ,
329
- nvme_to_user_ptr ( cmd .addr ) , cmd .data_len ,
330
+ cmd .addr , cmd .data_len ,
330
331
nvme_to_user_ptr (cmd .metadata ), cmd .metadata_len ,
331
332
0 , & cmd .result , timeout , vec );
332
333
@@ -512,9 +513,9 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
512
513
req -> timeout = d .timeout_ms ? msecs_to_jiffies (d .timeout_ms ) : 0 ;
513
514
514
515
if (d .addr && d .data_len ) {
515
- ret = nvme_map_user_request (req , nvme_to_user_ptr ( d .addr ) ,
516
+ ret = nvme_map_user_request (req , d .addr ,
516
517
d .data_len , nvme_to_user_ptr (d .metadata ),
517
- d .metadata_len , 0 , & meta , vec );
518
+ d .metadata_len , 0 , & meta , ioucmd , vec );
518
519
if (ret )
519
520
return ret ;
520
521
}
0 commit comments