Commit 2e45a495 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.4-2023-06-01' of git://git.infradead.org/nvme into block-6.4

Pull NVMe fixes from Keith:

"nvme fixes for Linux 6.4

 - Fixes for spurious Keep Alive timeouts (Uday)
 - Fix for command type check on passthrough actions (Min)
 - Fix for nvme command name for error logging (Christoph)"

* tag 'nvme-6.4-2023-06-01' of git://git.infradead.org/nvme:
  nvme: fix the name of Zone Append for verbose logging
  nvme: improve handling of long keep alives
  nvme: check IO start time when deciding to defer KA
  nvme: double KA polling frequency to avoid KATO with TBKAS on
  nvme: fix miss command type check
parents 47fe1c30 85630379
...@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = { ...@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
[nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_resv_release] = "Reservation Release",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
[nvme_cmd_zone_append] = "Zone Management Append", [nvme_cmd_zone_append] = "Zone Append",
}; };
static const char * const nvme_admin_ops[] = { static const char * const nvme_admin_ops[] = {
......
...@@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req) ...@@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req); trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
if (ctrl->kas) /*
* Completions of long-running commands should not be able to
* defer sending of periodic keep alives, since the controller
* may have completed processing such commands a long time ago
* (arbitrarily close to command submission time).
* req->deadline - req->timeout is the command submission time
* in jiffies.
*/
if (ctrl->kas &&
req->deadline - req->timeout >= ctrl->ka_last_check_time)
ctrl->comp_seen = true; ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) { switch (nvme_decide_disposition(req)) {
...@@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) ...@@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
} }
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status) struct nvme_command *cmd, int status)
{ {
if (effects & NVME_CMD_EFFECTS_CSE_MASK) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
...@@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, ...@@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work); flush_work(&ctrl->scan_work);
} }
if (ns)
return;
switch (cmd->common.opcode) { switch (cmd->common.opcode) {
case nvme_admin_set_features: case nvme_admin_set_features:
...@@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); ...@@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
* The host should send Keep Alive commands at half of the Keep Alive Timeout * The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..]. * accounting for transport roundtrip times [..].
*/ */
static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
{
unsigned long delay = ctrl->kato * HZ / 2;
/*
* When using Traffic Based Keep Alive, we need to run
* nvme_keep_alive_work at twice the normal frequency, as one
* command completion can postpone sending a keep alive command
* by up to twice the delay between runs.
*/
if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
delay /= 2;
return delay;
}
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{ {
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); queue_delayed_work(nvme_wq, &ctrl->ka_work,
nvme_keep_alive_work_period(ctrl));
} }
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
...@@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ...@@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
struct nvme_ctrl *ctrl = rq->end_io_data; struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags; unsigned long flags;
bool startka = false; bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl);
/*
* Subtract off the keepalive RTT so nvme_keep_alive_work runs
* at the desired frequency.
*/
if (rtt <= delay) {
delay -= rtt;
} else {
dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
jiffies_to_msecs(rtt));
delay = 0;
}
blk_mq_free_request(rq); blk_mq_free_request(rq);
...@@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ...@@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
return RQ_END_IO_NONE; return RQ_END_IO_NONE;
} }
ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false; ctrl->comp_seen = false;
spin_lock_irqsave(&ctrl->lock, flags); spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->state == NVME_CTRL_LIVE || if (ctrl->state == NVME_CTRL_LIVE ||
...@@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ...@@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
startka = true; startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags); spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka) if (startka)
nvme_queue_keep_alive_work(ctrl); queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE; return RQ_END_IO_NONE;
} }
...@@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work) ...@@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
bool comp_seen = ctrl->comp_seen; bool comp_seen = ctrl->comp_seen;
struct request *rq; struct request *rq;
ctrl->ka_last_check_time = jiffies;
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
dev_dbg(ctrl->device, dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n"); "reschedule traffic based keep-alive timer\n");
......
...@@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
blk_mq_free_request(req); blk_mq_free_request(req);
if (effects) if (effects)
nvme_passthru_end(ctrl, effects, cmd, ret); nvme_passthru_end(ctrl, ns, effects, cmd, ret);
return ret; return ret;
} }
......
...@@ -328,6 +328,7 @@ struct nvme_ctrl { ...@@ -328,6 +328,7 @@ struct nvme_ctrl {
struct delayed_work ka_work; struct delayed_work ka_work;
struct delayed_work failfast_work; struct delayed_work failfast_work;
struct nvme_command ka_cmd; struct nvme_command ka_cmd;
unsigned long ka_last_check_time;
struct work_struct fw_act_work; struct work_struct fw_act_work;
unsigned long events; unsigned long events;
...@@ -1077,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1077,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode); u8 opcode);
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
int nvme_execute_rq(struct request *rq, bool at_head); int nvme_execute_rq(struct request *rq, bool at_head);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status); struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
......
...@@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) ...@@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
blk_mq_free_request(rq); blk_mq_free_request(rq);
if (effects) if (effects)
nvme_passthru_end(ctrl, effects, req->cmd, status); nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
} }
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment