Skip to content

Commit 0f5be6a

Browse files
igawkeithbusch
authored andcommitted
nvmet: update AEN list and array at one place
All async events are enqueued via nvmet_add_async_event() which updates the ctrl->async_event_cmds[] array and additionally an struct nvmet_async_event is added to the ctrl->async_events list. Under normal operations the nvmet_async_event_work() updates again the ctrl->async_event_cmds and removes the corresponding struct nvmet_async_event from the list again. Though nvmet_sq_destroy() could be called which calls nvmet_async_events_free() which only updates the ctrl->async_event_cmds[] array. Add new functions nvmet_async_events_process() and nvmet_async_events_free() to process async events, update an array and the list. When we destroy submission queue after clearing the aen present on the ctrl->async list we also loop over ctrl->async_event_cmds[] for any requests posted by the host for which we don't have the AEN in the ctrl->async_events list by calling nvmet_async_event_process() and nvmet_async_events_free(). Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Daniel Wagner <[email protected]> [[email protected] * Loop over and clear out outstanding requests * Update changelog ] Signed-off-by: Chaitanya Kulkarni <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 1a3f540 commit 0f5be6a

File tree

1 file changed

+36
-27
lines changed

1 file changed

+36
-27
lines changed

drivers/nvme/target/core.c

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
129129
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
130130
}
131131

132-
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
133-
{
134-
struct nvmet_req *req;
135-
136-
while (1) {
137-
mutex_lock(&ctrl->lock);
138-
if (!ctrl->nr_async_event_cmds) {
139-
mutex_unlock(&ctrl->lock);
140-
return;
141-
}
142-
143-
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
144-
mutex_unlock(&ctrl->lock);
145-
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
146-
}
147-
}
148-
149-
static void nvmet_async_event_work(struct work_struct *work)
132+
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
150133
{
151-
struct nvmet_ctrl *ctrl =
152-
container_of(work, struct nvmet_ctrl, async_event_work);
153134
struct nvmet_async_event *aen;
154135
struct nvmet_req *req;
155136

@@ -159,20 +140,43 @@ static void nvmet_async_event_work(struct work_struct *work)
159140
struct nvmet_async_event, entry);
160141
if (!aen || !ctrl->nr_async_event_cmds) {
161142
mutex_unlock(&ctrl->lock);
162-
return;
143+
break;
163144
}
164145

165146
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
166-
nvmet_set_result(req, nvmet_async_event_result(aen));
147+
if (status == 0)
148+
nvmet_set_result(req, nvmet_async_event_result(aen));
167149

168150
list_del(&aen->entry);
169151
kfree(aen);
170152

171153
mutex_unlock(&ctrl->lock);
172-
nvmet_req_complete(req, 0);
154+
nvmet_req_complete(req, status);
173155
}
174156
}
175157

158+
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
159+
{
160+
struct nvmet_req *req;
161+
162+
mutex_lock(&ctrl->lock);
163+
while (ctrl->nr_async_event_cmds) {
164+
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
165+
mutex_unlock(&ctrl->lock);
166+
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
167+
mutex_lock(&ctrl->lock);
168+
}
169+
mutex_unlock(&ctrl->lock);
170+
}
171+
172+
static void nvmet_async_event_work(struct work_struct *work)
173+
{
174+
struct nvmet_ctrl *ctrl =
175+
container_of(work, struct nvmet_ctrl, async_event_work);
176+
177+
nvmet_async_events_process(ctrl, 0);
178+
}
179+
176180
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
177181
u8 event_info, u8 log_page)
178182
{
@@ -753,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
753757

754758
void nvmet_sq_destroy(struct nvmet_sq *sq)
755759
{
760+
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
761+
struct nvmet_ctrl *ctrl = sq->ctrl;
762+
756763
/*
757764
* If this is the admin queue, complete all AERs so that our
758765
* queue doesn't have outstanding requests on it.
759766
*/
760-
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
761-
nvmet_async_events_free(sq->ctrl);
767+
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
768+
nvmet_async_events_process(ctrl, status);
769+
nvmet_async_events_free(ctrl);
770+
}
762771
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
763772
wait_for_completion(&sq->confirm_done);
764773
wait_for_completion(&sq->free_done);
765774
percpu_ref_exit(&sq->ref);
766775

767-
if (sq->ctrl) {
768-
nvmet_ctrl_put(sq->ctrl);
776+
if (ctrl) {
777+
nvmet_ctrl_put(ctrl);
769778
sq->ctrl = NULL; /* allows reusing the queue later */
770779
}
771780
}

0 commit comments

Comments
 (0)