@@ -349,6 +349,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
349
349
}
350
350
}
351
351
352
+ static struct nvmf_connect_data * nvmf_connect_data_prep (struct nvme_ctrl * ctrl ,
353
+ u16 cntlid )
354
+ {
355
+ struct nvmf_connect_data * data ;
356
+
357
+ data = kzalloc (sizeof (* data ), GFP_KERNEL );
358
+ if (!data )
359
+ return NULL ;
360
+
361
+ uuid_copy (& data -> hostid , & ctrl -> opts -> host -> id );
362
+ data -> cntlid = cpu_to_le16 (cntlid );
363
+ strncpy (data -> subsysnqn , ctrl -> opts -> subsysnqn , NVMF_NQN_SIZE );
364
+ strncpy (data -> hostnqn , ctrl -> opts -> host -> nqn , NVMF_NQN_SIZE );
365
+
366
+ return data ;
367
+ }
368
+
369
+ static void nvmf_connect_cmd_prep (struct nvme_ctrl * ctrl , u16 qid ,
370
+ struct nvme_command * cmd )
371
+ {
372
+ cmd -> connect .opcode = nvme_fabrics_command ;
373
+ cmd -> connect .fctype = nvme_fabrics_type_connect ;
374
+ cmd -> connect .qid = cpu_to_le16 (qid );
375
+
376
+ if (qid ) {
377
+ cmd -> connect .sqsize = cpu_to_le16 (ctrl -> sqsize );
378
+ } else {
379
+ cmd -> connect .sqsize = cpu_to_le16 (NVME_AQ_DEPTH - 1 );
380
+
381
+ /*
382
+ * set keep-alive timeout in seconds granularity (ms * 1000)
383
+ */
384
+ cmd -> connect .kato = cpu_to_le32 (ctrl -> kato * 1000 );
385
+ }
386
+
387
+ if (ctrl -> opts -> disable_sqflow )
388
+ cmd -> connect .cattr |= NVME_CONNECT_DISABLE_SQFLOW ;
389
+ }
390
+
352
391
/**
353
392
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
354
393
* API function.
@@ -377,28 +416,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
377
416
int ret ;
378
417
u32 result ;
379
418
380
- cmd .connect .opcode = nvme_fabrics_command ;
381
- cmd .connect .fctype = nvme_fabrics_type_connect ;
382
- cmd .connect .qid = 0 ;
383
- cmd .connect .sqsize = cpu_to_le16 (NVME_AQ_DEPTH - 1 );
384
-
385
- /*
386
- * Set keep-alive timeout in seconds granularity (ms * 1000)
387
- */
388
- cmd .connect .kato = cpu_to_le32 (ctrl -> kato * 1000 );
389
-
390
- if (ctrl -> opts -> disable_sqflow )
391
- cmd .connect .cattr |= NVME_CONNECT_DISABLE_SQFLOW ;
419
+ nvmf_connect_cmd_prep (ctrl , 0 , & cmd );
392
420
393
- data = kzalloc ( sizeof ( * data ), GFP_KERNEL );
421
+ data = nvmf_connect_data_prep ( ctrl , 0xffff );
394
422
if (!data )
395
423
return - ENOMEM ;
396
424
397
- uuid_copy (& data -> hostid , & ctrl -> opts -> host -> id );
398
- data -> cntlid = cpu_to_le16 (0xffff );
399
- strncpy (data -> subsysnqn , ctrl -> opts -> subsysnqn , NVMF_NQN_SIZE );
400
- strncpy (data -> hostnqn , ctrl -> opts -> host -> nqn , NVMF_NQN_SIZE );
401
-
402
425
ret = __nvme_submit_sync_cmd (ctrl -> fabrics_q , & cmd , & res ,
403
426
data , sizeof (* data ), NVME_QID_ANY , 1 ,
404
427
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT );
@@ -468,23 +491,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
468
491
int ret ;
469
492
u32 result ;
470
493
471
- cmd .connect .opcode = nvme_fabrics_command ;
472
- cmd .connect .fctype = nvme_fabrics_type_connect ;
473
- cmd .connect .qid = cpu_to_le16 (qid );
474
- cmd .connect .sqsize = cpu_to_le16 (ctrl -> sqsize );
475
-
476
- if (ctrl -> opts -> disable_sqflow )
477
- cmd .connect .cattr |= NVME_CONNECT_DISABLE_SQFLOW ;
494
+ nvmf_connect_cmd_prep (ctrl , qid , & cmd );
478
495
479
- data = kzalloc ( sizeof ( * data ), GFP_KERNEL );
496
+ data = nvmf_connect_data_prep ( ctrl , ctrl -> cntlid );
480
497
if (!data )
481
498
return - ENOMEM ;
482
499
483
- uuid_copy (& data -> hostid , & ctrl -> opts -> host -> id );
484
- data -> cntlid = cpu_to_le16 (ctrl -> cntlid );
485
- strncpy (data -> subsysnqn , ctrl -> opts -> subsysnqn , NVMF_NQN_SIZE );
486
- strncpy (data -> hostnqn , ctrl -> opts -> host -> nqn , NVMF_NQN_SIZE );
487
-
488
500
ret = __nvme_submit_sync_cmd (ctrl -> connect_q , & cmd , & res ,
489
501
data , sizeof (* data ), qid , 1 ,
490
502
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT );
0 commit comments