Skip to content

Commit dfdcbf1

Browse files
committed
Merge tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme into for-6.1/block
Pull NVMe updates from Christoph: "nvme updates for Linux 6.1 - handle effects after freeing the request (Keith Busch) - copy firmware_rev on each init (Keith Busch) - restrict management ioctls to admin (Keith Busch) - ensure subsystem reset is single threaded (Keith Busch) - report the actual number of tagset maps in nvme-pci (Keith Busch) - small fabrics authentication fixups (Christoph Hellwig) - add common code for tagset allocation and freeing (Christoph Hellwig) - stop using the request_queue in nvmet (Christoph Hellwig) - set min_align_mask before calculating max_hw_sectors (Rishabh Bhatnagar) - send a rediscover uevent when a persistent discovery controller reconnects (Sagi Grimberg) - misc nvmet-tcp fixes (Varun Prakash, zhenwei pi)" * tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme: (31 commits) nvmet: don't look at the request_queue in nvmet_bdev_set_limits nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all nvme: remove nvme_ctrl_init_connect_q nvme-loop: use the tagset alloc/free helpers nvme-loop: store the generic nvme_ctrl in set->driver_data nvme-loop: initialize sqsize later nvme-fc: use the tagset alloc/free helpers nvme-fc: store the generic nvme_ctrl in set->driver_data nvme-fc: keep ctrl->sqsize in sync with opts->queue_size nvme-rdma: use the tagset alloc/free helpers nvme-rdma: store the generic nvme_ctrl in set->driver_data nvme-tcp: use the tagset alloc/free helpers nvme-tcp: store the generic nvme_ctrl in set->driver_data nvme-tcp: remove the unused queue_size member in nvme_tcp_queue nvme: add common helpers to allocate and free tagsets nvme-auth: add a MAINTAINERS entry nvmet: add helpers to set the result field for connect commands nvme: improve the NVME_CONNECT_AUTHREQ* definitions nvmet-auth: don't try to cancel a non-initialized work_struct nvmet-tcp: remove nvmet_tcp_finish_cmd ...
2 parents c68f4f4 + 84fe64f commit dfdcbf1

File tree

18 files changed

+356
-435
lines changed

18 files changed

+356
-435
lines changed

MAINTAINERS

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14542,6 +14542,15 @@ F: drivers/nvme/common/
1454214542
F: include/linux/nvme*
1454314543
F: include/uapi/linux/nvme_ioctl.h
1454414544

14545+
NVM EXPRESS FABRICS AUTHENTICATION
14546+
M: Hannes Reinecke <[email protected]>
14547+
14548+
S: Supported
14549+
F: drivers/nvme/host/auth.c
14550+
F: drivers/nvme/target/auth.c
14551+
F: drivers/nvme/target/fabrics-cmd-auth.c
14552+
F: include/linux/nvme-auth.h
14553+
1454514554
NVM EXPRESS FC TRANSPORT DRIVERS
1454614555
M: James Smart <[email protected]>
1454714556

drivers/nvme/host/core.c

Lines changed: 120 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
11111111
return effects;
11121112
}
11131113

1114-
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1115-
struct nvme_command *cmd, int status)
1114+
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1115+
struct nvme_command *cmd, int status)
11161116
{
11171117
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
11181118
nvme_unfreeze(ctrl);
@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
11481148
break;
11491149
}
11501150
}
1151+
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
11511152

1152-
int nvme_execute_passthru_rq(struct request *rq)
1153+
int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
11531154
{
11541155
struct nvme_command *cmd = nvme_req(rq)->cmd;
11551156
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
11561157
struct nvme_ns *ns = rq->q->queuedata;
1157-
u32 effects;
1158-
int ret;
11591158

1160-
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1161-
ret = nvme_execute_rq(rq, false);
1162-
if (effects) /* nothing to be done for zero cmd effects */
1163-
nvme_passthru_end(ctrl, effects, cmd, ret);
1164-
1165-
return ret;
1159+
*effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1160+
return nvme_execute_rq(rq, false);
11661161
}
11671162
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
11681163

@@ -2898,7 +2893,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
28982893
nvme_init_subnqn(subsys, ctrl, id);
28992894
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
29002895
memcpy(subsys->model, id->mn, sizeof(subsys->model));
2901-
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
29022896
subsys->vendor_id = le16_to_cpu(id->vid);
29032897
subsys->cmic = id->cmic;
29042898

@@ -3117,6 +3111,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
31173111
ctrl->quirks |= core_quirks[i].quirks;
31183112
}
31193113
}
3114+
memcpy(ctrl->subsys->firmware_rev, id->fr,
3115+
sizeof(ctrl->subsys->firmware_rev));
31203116

31213117
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
31223118
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -4800,6 +4796,108 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
48004796
}
48014797
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
48024798

4799+
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4800+
const struct blk_mq_ops *ops, unsigned int flags,
4801+
unsigned int cmd_size)
4802+
{
4803+
int ret;
4804+
4805+
memset(set, 0, sizeof(*set));
4806+
set->ops = ops;
4807+
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4808+
if (ctrl->ops->flags & NVME_F_FABRICS)
4809+
set->reserved_tags = NVMF_RESERVED_TAGS;
4810+
set->numa_node = ctrl->numa_node;
4811+
set->flags = flags;
4812+
set->cmd_size = cmd_size;
4813+
set->driver_data = ctrl;
4814+
set->nr_hw_queues = 1;
4815+
set->timeout = NVME_ADMIN_TIMEOUT;
4816+
ret = blk_mq_alloc_tag_set(set);
4817+
if (ret)
4818+
return ret;
4819+
4820+
ctrl->admin_q = blk_mq_init_queue(set);
4821+
if (IS_ERR(ctrl->admin_q)) {
4822+
ret = PTR_ERR(ctrl->admin_q);
4823+
goto out_free_tagset;
4824+
}
4825+
4826+
if (ctrl->ops->flags & NVME_F_FABRICS) {
4827+
ctrl->fabrics_q = blk_mq_init_queue(set);
4828+
if (IS_ERR(ctrl->fabrics_q)) {
4829+
ret = PTR_ERR(ctrl->fabrics_q);
4830+
goto out_cleanup_admin_q;
4831+
}
4832+
}
4833+
4834+
ctrl->admin_tagset = set;
4835+
return 0;
4836+
4837+
out_cleanup_admin_q:
4838+
blk_mq_destroy_queue(ctrl->fabrics_q);
4839+
out_free_tagset:
4840+
blk_mq_free_tag_set(ctrl->admin_tagset);
4841+
return ret;
4842+
}
4843+
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
4844+
4845+
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4846+
{
4847+
blk_mq_destroy_queue(ctrl->admin_q);
4848+
if (ctrl->ops->flags & NVME_F_FABRICS)
4849+
blk_mq_destroy_queue(ctrl->fabrics_q);
4850+
blk_mq_free_tag_set(ctrl->admin_tagset);
4851+
}
4852+
EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
4853+
4854+
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4855+
const struct blk_mq_ops *ops, unsigned int flags,
4856+
unsigned int cmd_size)
4857+
{
4858+
int ret;
4859+
4860+
memset(set, 0, sizeof(*set));
4861+
set->ops = ops;
4862+
set->queue_depth = ctrl->sqsize + 1;
4863+
set->reserved_tags = NVMF_RESERVED_TAGS;
4864+
set->numa_node = ctrl->numa_node;
4865+
set->flags = flags;
4866+
set->cmd_size = cmd_size,
4867+
set->driver_data = ctrl;
4868+
set->nr_hw_queues = ctrl->queue_count - 1;
4869+
set->timeout = NVME_IO_TIMEOUT;
4870+
if (ops->map_queues)
4871+
set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
4872+
ret = blk_mq_alloc_tag_set(set);
4873+
if (ret)
4874+
return ret;
4875+
4876+
if (ctrl->ops->flags & NVME_F_FABRICS) {
4877+
ctrl->connect_q = blk_mq_init_queue(set);
4878+
if (IS_ERR(ctrl->connect_q)) {
4879+
ret = PTR_ERR(ctrl->connect_q);
4880+
goto out_free_tag_set;
4881+
}
4882+
}
4883+
4884+
ctrl->tagset = set;
4885+
return 0;
4886+
4887+
out_free_tag_set:
4888+
blk_mq_free_tag_set(set);
4889+
return ret;
4890+
}
4891+
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
4892+
4893+
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4894+
{
4895+
if (ctrl->ops->flags & NVME_F_FABRICS)
4896+
blk_mq_destroy_queue(ctrl->connect_q);
4897+
blk_mq_free_tag_set(ctrl->tagset);
4898+
}
4899+
EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
4900+
48034901
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
48044902
{
48054903
nvme_mpath_stop(ctrl);
@@ -4819,6 +4917,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
48194917

48204918
nvme_enable_aen(ctrl);
48214919

4920+
/*
4921+
* persistent discovery controllers need to send indication to userspace
4922+
* to re-read the discovery log page to learn about possible changes
4923+
* that were missed. We identify persistent discovery controllers by
4924+
* checking that they started once before, hence are reconnecting back.
4925+
*/
4926+
if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4927+
nvme_discovery_ctrl(ctrl))
4928+
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
4929+
48224930
if (ctrl->queue_count > 1) {
48234931
nvme_queue_scan(ctrl);
48244932
nvme_start_queues(ctrl);

0 commit comments

Comments
 (0)