Skip to content

Commit d459b16

Browse files
committed
Merge tag 'nvme-6.17-2025-07-31' of git://git.infradead.org/nvme into block-6.17
Pull NVMe changes from Christoph: "- add support for getting the FDP featuee in fabrics passthru path (Nitesh Shetty) - add capability to connect to an administrative controller (Kamaljit Singh) - fix a leak on sgl setup error (Keith Busch) - initialize discovery subsys after debugfs is initialized (Mohamed Khalfella) - fix various comment typos (Bjorn Helgaas) - remove unneeded semicolons (Jiapeng Chong)" * tag 'nvme-6.17-2025-07-31' of git://git.infradead.org/nvme: nvme: fix various comment typos nvme-auth: remove unneeded semicolon nvme-pci: fix leak on sgl setup error nvmet: initialize discovery subsys after debugfs is initialized nvme: add capability to connect to an administrative controller nvmet: add support for FDP in fabrics passthru path
2 parents 04225d1 + 367c240 commit d459b16

File tree

9 files changed

+36
-18
lines changed

9 files changed

+36
-18
lines changed

drivers/nvme/host/auth.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
742742
"%s: qid %d failed to generate digest, error %d\n",
743743
__func__, chap->qid, ret);
744744
goto out_free_psk;
745-
};
745+
}
746746
dev_dbg(ctrl->device, "%s: generated digest %s\n",
747747
__func__, digest);
748748
ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
@@ -752,7 +752,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
752752
"%s: qid %d failed to derive TLS psk, error %d\n",
753753
__func__, chap->qid, ret);
754754
goto out_free_digest;
755-
};
755+
}
756756

757757
tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
758758
ctrl->opts->host->nqn,

drivers/nvme/host/core.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3158,6 +3158,11 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
31583158
return ctrl->opts && ctrl->opts->discovery_nqn;
31593159
}
31603160

3161+
static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
3162+
{
3163+
return ctrl->cntrltype == NVME_CTRL_ADMIN;
3164+
}
3165+
31613166
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
31623167
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
31633168
{
@@ -3670,6 +3675,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
36703675
if (ret)
36713676
return ret;
36723677

3678+
if (nvme_admin_ctrl(ctrl)) {
3679+
/*
3680+
* An admin controller has one admin queue, but no I/O queues.
3681+
* Override queue_count so it only creates an admin queue.
3682+
*/
3683+
dev_dbg(ctrl->device,
3684+
"Subsystem %s is an administrative controller",
3685+
ctrl->subsys->subnqn);
3686+
ctrl->queue_count = 1;
3687+
}
3688+
36733689
ret = nvme_configure_apst(ctrl);
36743690
if (ret < 0)
36753691
return ret;

drivers/nvme/host/fc.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1363,7 +1363,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
13631363
* down, and the related FC-NVME Association ID and Connection IDs
13641364
* become invalid.
13651365
*
1366-
* The behavior of the fc-nvme initiator is such that it's
1366+
* The behavior of the fc-nvme initiator is such that its
13671367
* understanding of the association and connections will implicitly
13681368
* be torn down. The action is implicit as it may be due to a loss of
13691369
* connectivity with the fc-nvme target, so you may never get a
@@ -2777,7 +2777,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
27772777
* as WRITE ZEROES will return a non-zero rq payload_bytes yet
27782778
* there is no actual payload to be transferred.
27792779
* To get it right, key data transmission on there being 1 or
2780-
* more physical segments in the sg list. If there is no
2780+
* more physical segments in the sg list. If there are no
27812781
* physical segments, there is no payload.
27822782
*/
27832783
if (blk_rq_nr_phys_segments(rq)) {

drivers/nvme/host/pci.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -935,7 +935,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
935935

936936
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
937937
if (unlikely(iter->status))
938-
nvme_free_sgls(req);
938+
nvme_unmap_data(req);
939939
return iter->status;
940940
}
941941

drivers/nvme/host/tcp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2179,7 +2179,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
21792179

21802180
/*
21812181
* Only start IO queues for which we have allocated the tagset
2182-
* and limitted it to the available queues. On reconnects, the
2182+
* and limited it to the available queues. On reconnects, the
21832183
* queue number might have changed.
21842184
*/
21852185
nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);

drivers/nvme/target/core.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1960,24 +1960,24 @@ static int __init nvmet_init(void)
19601960
if (!nvmet_wq)
19611961
goto out_free_buffered_work_queue;
19621962

1963-
error = nvmet_init_discovery();
1963+
error = nvmet_init_debugfs();
19641964
if (error)
19651965
goto out_free_nvmet_work_queue;
19661966

1967-
error = nvmet_init_debugfs();
1967+
error = nvmet_init_discovery();
19681968
if (error)
1969-
goto out_exit_discovery;
1969+
goto out_exit_debugfs;
19701970

19711971
error = nvmet_init_configfs();
19721972
if (error)
1973-
goto out_exit_debugfs;
1973+
goto out_exit_discovery;
19741974

19751975
return 0;
19761976

1977-
out_exit_debugfs:
1978-
nvmet_exit_debugfs();
19791977
out_exit_discovery:
19801978
nvmet_exit_discovery();
1979+
out_exit_debugfs:
1980+
nvmet_exit_debugfs();
19811981
out_free_nvmet_work_queue:
19821982
destroy_workqueue(nvmet_wq);
19831983
out_free_buffered_work_queue:

drivers/nvme/target/fc.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
459459
* down, and the related FC-NVME Association ID and Connection IDs
460460
* become invalid.
461461
*
462-
* The behavior of the fc-nvme target is such that it's
462+
* The behavior of the fc-nvme target is such that its
463463
* understanding of the association and connections will implicitly
464464
* be torn down. The action is implicit as it may be due to a loss of
465465
* connectivity with the fc-nvme host, so the target may never get a
@@ -2313,7 +2313,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
23132313
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
23142314
if (ret) {
23152315
/*
2316-
* should be ok to set w/o lock as its in the thread of
2316+
* should be ok to set w/o lock as it's in the thread of
23172317
* execution (not an async timer routine) and doesn't
23182318
* contend with any clearing action
23192319
*/
@@ -2629,7 +2629,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
26292629
* and the api of the FC LLDD which may issue a hw command to send the
26302630
* response, but the LLDD may not get the hw completion for that command
26312631
* and upcall the nvmet_fc layer before a new command may be
2632-
* asynchronously received - its possible for a command to be received
2632+
* asynchronously received - it's possible for a command to be received
26332633
* before the LLDD and nvmet_fc have recycled the job structure. It gives
26342634
* the appearance of more commands received than fits in the sq.
26352635
* To alleviate this scenario, a temporary queue is maintained in the

drivers/nvme/target/passthru.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
533533
case NVME_FEAT_HOST_ID:
534534
req->execute = nvmet_execute_get_features;
535535
return NVME_SC_SUCCESS;
536+
case NVME_FEAT_FDP:
537+
return nvmet_setup_passthru_command(req);
536538
default:
537539
return nvmet_passthru_get_set_features(req);
538540
}

drivers/nvme/target/rdma.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
17311731
* We registered an ib_client to handle device removal for queues,
17321732
* so we only need to handle the listening port cm_ids. In this case
17331733
* we nullify the priv to prevent double cm_id destruction and destroying
1734-
* the cm_id implicitely by returning a non-zero rc to the callout.
1734+
* the cm_id implicitly by returning a non-zero rc to the callout.
17351735
*/
17361736
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
17371737
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
17421742
/*
17431743
* This is a queue cm_id. we have registered
17441744
* an ib_client to handle queues removal
1745-
* so don't interfear and just return.
1745+
* so don't interfere and just return.
17461746
*/
17471747
return 0;
17481748
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
17601760

17611761
/*
17621762
* We need to return 1 so that the core will destroy
1763-
* it's own ID. What a great API design..
1763+
* its own ID. What a great API design..
17641764
*/
17651765
return 1;
17661766
}

0 commit comments

Comments
 (0)