Commit 0f5be6a4 authored by Daniel Wagner's avatar Daniel Wagner Committed by Keith Busch

nvmet: update AEN list and array at one place

All async events are enqueued via nvmet_add_async_event() which
updates the ctrl->async_event_cmds[] array and additionally an struct
nvmet_async_event is added to the ctrl->async_events list.

Under normal operations the nvmet_async_event_work() updates again
the ctrl->async_event_cmds and removes the corresponding struct
nvmet_async_event from the list again. Though nvmet_sq_destroy() could
be called which calls nvmet_async_events_free() which only updates the
ctrl->async_event_cmds[] array.

Add new functions nvmet_async_events_process() and
nvmet_async_events_free() to process async events, update an array and
the list.

When we destroy submission queue after clearing the aen present on
the ctrl->async list we also loop over ctrl->async_event_cmds[] for
any requests posted by the host for which we don't have the AEN in
the ctrl->async_events list by calling nvmet_async_event_process()
and nvmet_async_events_free().
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDaniel Wagner <dwagner@suse.de>
[chaitanya.kulkarni@wdc.com
 * Loop over and clear out outstanding requests
 * Update changelog
]
Signed-off-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent 1a3f540d
...@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen) ...@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
} }
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
{
struct nvmet_req *req;
while (1) {
mutex_lock(&ctrl->lock);
if (!ctrl->nr_async_event_cmds) {
mutex_unlock(&ctrl->lock);
return;
}
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
}
}
static void nvmet_async_event_work(struct work_struct *work)
{ {
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);
struct nvmet_async_event *aen; struct nvmet_async_event *aen;
struct nvmet_req *req; struct nvmet_req *req;
...@@ -159,20 +140,43 @@ static void nvmet_async_event_work(struct work_struct *work) ...@@ -159,20 +140,43 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_async_event, entry); struct nvmet_async_event, entry);
if (!aen || !ctrl->nr_async_event_cmds) { if (!aen || !ctrl->nr_async_event_cmds) {
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
return; break;
} }
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
nvmet_set_result(req, nvmet_async_event_result(aen)); if (status == 0)
nvmet_set_result(req, nvmet_async_event_result(aen));
list_del(&aen->entry); list_del(&aen->entry);
kfree(aen); kfree(aen);
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
nvmet_req_complete(req, 0); nvmet_req_complete(req, status);
} }
} }
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
{
struct nvmet_req *req;
mutex_lock(&ctrl->lock);
while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
}
static void nvmet_async_event_work(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);
nvmet_async_events_process(ctrl, 0);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page) u8 event_info, u8 log_page)
{ {
...@@ -753,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) ...@@ -753,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
void nvmet_sq_destroy(struct nvmet_sq *sq) void nvmet_sq_destroy(struct nvmet_sq *sq)
{ {
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_ctrl *ctrl = sq->ctrl;
/* /*
* If this is the admin queue, complete all AERs so that our * If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it. * queue doesn't have outstanding requests on it.
*/ */
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
nvmet_async_events_free(sq->ctrl); nvmet_async_events_process(ctrl, status);
nvmet_async_events_free(ctrl);
}
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done); wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref); percpu_ref_exit(&sq->ref);
if (sq->ctrl) { if (ctrl) {
nvmet_ctrl_put(sq->ctrl); nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */ sq->ctrl = NULL; /* allows reusing the queue later */
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment