Skip to content

Commit 5a47c20

Browse files
guixinliu1995keithbusch
authored andcommitted
nvmet: support reservation feature
This patch implements the reservation feature, including: 1. reservation register(register, unregister and replace). 2. reservation acquire(acquire, preempt, preempt and abort). 3. reservation release(release and clear). 4. reservation report. 5. set feature and get feature of reservation notify mask. 6. get log page of reservation event. Not supported: 1. persistent reservation through power loss. Test cases: Use nvme-cli and fio to test all implemented sub features: 1. use nvme resv-register to register host a registrant or unregister or replace a new key. 2. use nvme resv-acquire to set host to the holder, and use fio to send read and write io in all reservation type. And also test preempt and "preempt and abort". 3. use nvme resv-report to show all registrants and reservation status. 4. use nvme resv-release to release all registrants. 5. use nvme get-log to get events generated by the preceding operations. In addition, make reservation configurable, one can set ns to support reservation before enable ns. The default of resv_enable is false. Signed-off-by: Guixin Liu <[email protected]> Reviewed-by: Dmitry Bogdanov <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Tested-by: Chaitanya Kulkarni <[email protected]> Reviewed-by: Chaitanya Kulkarni <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 1900e1a commit 5a47c20

File tree

8 files changed

+1329
-13
lines changed

8 files changed

+1329
-13
lines changed

drivers/nvme/target/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
1010
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
1111

1212
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
13-
discovery.o io-cmd-file.o io-cmd-bdev.o
13+
discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
1414
nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
1515
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
1616
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o

drivers/nvme/target/admin-cmd.c

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
176176
log->iocs[nvme_cmd_read] =
177177
log->iocs[nvme_cmd_flush] =
178178
log->iocs[nvme_cmd_dsm] =
179+
log->iocs[nvme_cmd_resv_acquire] =
180+
log->iocs[nvme_cmd_resv_register] =
181+
log->iocs[nvme_cmd_resv_release] =
182+
log->iocs[nvme_cmd_resv_report] =
179183
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
180184
log->iocs[nvme_cmd_write] =
181185
log->iocs[nvme_cmd_write_zeroes] =
@@ -340,6 +344,8 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
340344
return nvmet_execute_get_log_cmd_effects_ns(req);
341345
case NVME_LOG_ANA:
342346
return nvmet_execute_get_log_page_ana(req);
347+
case NVME_LOG_RESERVATION:
348+
return nvmet_execute_get_log_page_resv(req);
343349
}
344350
pr_debug("unhandled lid %d on qid %d\n",
345351
req->cmd->get_log_page.lid, req->sq->qid);
@@ -433,7 +439,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
433439
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
434440
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
435441
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
436-
NVME_CTRL_ONCS_WRITE_ZEROES);
442+
NVME_CTRL_ONCS_WRITE_ZEROES |
443+
NVME_CTRL_ONCS_RESERVATIONS);
437444

438445
/* XXX: don't report vwc if the underlying device is write through */
439446
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -551,6 +558,15 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
551558
id->nmic = NVME_NS_NMIC_SHARED;
552559
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
553560

561+
if (req->ns->pr.enable)
562+
id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
563+
NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
564+
NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
565+
NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
566+
NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
567+
NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
568+
NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
569+
554570
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
555571

556572
id->lbaf[0].ds = req->ns->blksize_shift;
@@ -861,6 +877,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
861877
case NVME_FEAT_WRITE_PROTECT:
862878
status = nvmet_set_feat_write_protect(req);
863879
break;
880+
case NVME_FEAT_RESV_MASK:
881+
status = nvmet_set_feat_resv_notif_mask(req, cdw11);
882+
break;
864883
default:
865884
req->error_loc = offsetof(struct nvme_common_command, cdw10);
866885
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
@@ -959,6 +978,9 @@ void nvmet_execute_get_features(struct nvmet_req *req)
959978
case NVME_FEAT_WRITE_PROTECT:
960979
status = nvmet_get_feat_write_protect(req);
961980
break;
981+
case NVME_FEAT_RESV_MASK:
982+
status = nvmet_get_feat_resv_notif_mask(req);
983+
break;
962984
default:
963985
req->error_loc =
964986
offsetof(struct nvme_common_command, cdw10);

drivers/nvme/target/configfs.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -769,6 +769,32 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
769769

770770
CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
771771

772+
static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page)
773+
{
774+
return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable);
775+
}
776+
777+
static ssize_t nvmet_ns_resv_enable_store(struct config_item *item,
778+
const char *page, size_t count)
779+
{
780+
struct nvmet_ns *ns = to_nvmet_ns(item);
781+
bool val;
782+
783+
if (kstrtobool(page, &val))
784+
return -EINVAL;
785+
786+
mutex_lock(&ns->subsys->lock);
787+
if (ns->enabled) {
788+
pr_err("the ns:%d is already enabled.\n", ns->nsid);
789+
mutex_unlock(&ns->subsys->lock);
790+
return -EINVAL;
791+
}
792+
ns->pr.enable = val;
793+
mutex_unlock(&ns->subsys->lock);
794+
return count;
795+
}
796+
CONFIGFS_ATTR(nvmet_ns_, resv_enable);
797+
772798
static struct configfs_attribute *nvmet_ns_attrs[] = {
773799
&nvmet_ns_attr_device_path,
774800
&nvmet_ns_attr_device_nguid,
@@ -777,6 +803,7 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
777803
&nvmet_ns_attr_enable,
778804
&nvmet_ns_attr_buffered_io,
779805
&nvmet_ns_attr_revalidate_size,
806+
&nvmet_ns_attr_resv_enable,
780807
#ifdef CONFIG_PCI_P2PDMA
781808
&nvmet_ns_attr_p2pmem,
782809
#endif

drivers/nvme/target/core.c

Lines changed: 56 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -611,6 +611,12 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
611611
if (ret)
612612
goto out_restore_subsys_maxnsid;
613613

614+
if (ns->pr.enable) {
615+
ret = nvmet_pr_init_ns(ns);
616+
if (ret)
617+
goto out_remove_from_subsys;
618+
}
619+
614620
subsys->nr_namespaces++;
615621

616622
nvmet_ns_changed(subsys, ns->nsid);
@@ -620,6 +626,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
620626
mutex_unlock(&subsys->lock);
621627
return ret;
622628

629+
out_remove_from_subsys:
630+
xa_erase(&subsys->namespaces, ns->nsid);
623631
out_restore_subsys_maxnsid:
624632
subsys->max_nsid = nvmet_max_nsid(subsys);
625633
percpu_ref_exit(&ns->ref);
@@ -663,6 +671,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
663671
wait_for_completion(&ns->disable_done);
664672
percpu_ref_exit(&ns->ref);
665673

674+
if (ns->pr.enable)
675+
nvmet_pr_exit_ns(ns);
676+
666677
mutex_lock(&subsys->lock);
667678

668679
subsys->nr_namespaces--;
@@ -754,6 +765,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
754765
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
755766
{
756767
struct nvmet_ns *ns = req->ns;
768+
struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref;
757769

758770
if (!req->sq->sqhd_disabled)
759771
nvmet_update_sq_head(req);
@@ -766,6 +778,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
766778
trace_nvmet_req_complete(req);
767779

768780
req->ops->queue_response(req);
781+
782+
if (pc_ref)
783+
nvmet_pr_put_ns_pc_ref(pc_ref);
769784
if (ns)
770785
nvmet_put_namespace(ns);
771786
}
@@ -929,18 +944,39 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
929944
return ret;
930945
}
931946

947+
if (req->ns->pr.enable) {
948+
ret = nvmet_parse_pr_cmd(req);
949+
if (!ret)
950+
return ret;
951+
}
952+
932953
switch (req->ns->csi) {
933954
case NVME_CSI_NVM:
934955
if (req->ns->file)
935-
return nvmet_file_parse_io_cmd(req);
936-
return nvmet_bdev_parse_io_cmd(req);
956+
ret = nvmet_file_parse_io_cmd(req);
957+
else
958+
ret = nvmet_bdev_parse_io_cmd(req);
959+
break;
937960
case NVME_CSI_ZNS:
938961
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
939-
return nvmet_bdev_zns_parse_io_cmd(req);
940-
return NVME_SC_INVALID_IO_CMD_SET;
962+
ret = nvmet_bdev_zns_parse_io_cmd(req);
963+
else
964+
ret = NVME_SC_INVALID_IO_CMD_SET;
965+
break;
941966
default:
942-
return NVME_SC_INVALID_IO_CMD_SET;
967+
ret = NVME_SC_INVALID_IO_CMD_SET;
943968
}
969+
if (ret)
970+
return ret;
971+
972+
if (req->ns->pr.enable) {
973+
ret = nvmet_pr_check_cmd_access(req);
974+
if (ret)
975+
return ret;
976+
977+
ret = nvmet_pr_get_ns_pc_ref(req);
978+
}
979+
return ret;
944980
}
945981

946982
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -964,6 +1000,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
9641000
req->ns = NULL;
9651001
req->error_loc = NVMET_NO_ERROR_LOC;
9661002
req->error_slba = 0;
1003+
req->pc_ref = NULL;
9671004

9681005
/* no support for fused commands yet */
9691006
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -1015,6 +1052,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
10151052
void nvmet_req_uninit(struct nvmet_req *req)
10161053
{
10171054
percpu_ref_put(&req->sq->ref);
1055+
if (req->pc_ref)
1056+
nvmet_pr_put_ns_pc_ref(req->pc_ref);
10181057
if (req->ns)
10191058
nvmet_put_namespace(req->ns);
10201059
}
@@ -1383,7 +1422,8 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
13831422
}
13841423

13851424
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1386-
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1425+
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
1426+
uuid_t *hostid)
13871427
{
13881428
struct nvmet_subsys *subsys;
13891429
struct nvmet_ctrl *ctrl;
@@ -1462,6 +1502,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
14621502
}
14631503
ctrl->cntlid = ret;
14641504

1505+
uuid_copy(&ctrl->hostid, hostid);
1506+
14651507
/*
14661508
* Discovery controllers may use some arbitrary high value
14671509
* in order to cleanup stale discovery sessions
@@ -1478,6 +1520,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
14781520
nvmet_start_keep_alive_timer(ctrl);
14791521

14801522
mutex_lock(&subsys->lock);
1523+
ret = nvmet_ctrl_init_pr(ctrl);
1524+
if (ret)
1525+
goto init_pr_fail;
14811526
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
14821527
nvmet_setup_p2p_ns_map(ctrl, req);
14831528
nvmet_debugfs_ctrl_setup(ctrl);
@@ -1486,6 +1531,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
14861531
*ctrlp = ctrl;
14871532
return 0;
14881533

1534+
init_pr_fail:
1535+
mutex_unlock(&subsys->lock);
1536+
nvmet_stop_keep_alive_timer(ctrl);
1537+
ida_free(&cntlid_ida, ctrl->cntlid);
14891538
out_free_sqs:
14901539
kfree(ctrl->sqs);
14911540
out_free_changed_ns_list:
@@ -1504,6 +1553,7 @@ static void nvmet_ctrl_free(struct kref *ref)
15041553
struct nvmet_subsys *subsys = ctrl->subsys;
15051554

15061555
mutex_lock(&subsys->lock);
1556+
nvmet_ctrl_destroy_pr(ctrl);
15071557
nvmet_release_p2p_ns_map(ctrl);
15081558
list_del(&ctrl->subsys_entry);
15091559
mutex_unlock(&subsys->lock);

drivers/nvme/target/fabrics-cmd.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,12 +245,10 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
245245
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
246246
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
247247
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
248-
le32_to_cpu(c->kato), &ctrl);
248+
le32_to_cpu(c->kato), &ctrl, &d->hostid);
249249
if (status)
250250
goto out;
251251

252-
uuid_copy(&ctrl->hostid, &d->hostid);
253-
254252
dhchap_status = nvmet_setup_auth(ctrl);
255253
if (dhchap_status) {
256254
pr_err("Failed to setup authentication, dhchap status %u\n",

0 commit comments

Comments
 (0)