Commit 2b749773 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.4-2023-06-02' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "Just an NVMe pull request with (mostly) KATO fixes, a regression fix
  for zoned device revalidation, and a fix for an md raid5 regression"

* tag 'block-6.4-2023-06-02' of git://git.kernel.dk/linux:
  nvme: fix the name of Zone Append for verbose logging
  nvme: improve handling of long keep alives
  nvme: check IO start time when deciding to defer KA
  nvme: double KA polling frequency to avoid KATO with TBKAS on
  nvme: fix miss command type check
  block: fix revalidate performance regression
  md/raid5: fix miscalculation of 'end_sector' in raid5_read_one_chunk()
parents 26d14779 2e45a495
......@@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk)
void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
{
struct request_queue *q = disk->queue;
unsigned int old_model = q->limits.zoned;
switch (model) {
case BLK_ZONED_HM:
......@@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
*/
blk_queue_zone_write_granularity(q,
queue_logical_block_size(q));
} else {
} else if (old_model != BLK_ZONED_NONE) {
disk_clear_zone_settings(disk);
}
}
......
......@@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
&dd_idx, NULL);
end_sector = bio_end_sector(raid_bio);
end_sector = sector + bio_sectors(raid_bio);
rcu_read_lock();
if (r5c_big_stripe_cached(conf, sector))
......
......@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
[nvme_cmd_resv_release] = "Reservation Release",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
[nvme_cmd_zone_append] = "Zone Management Append",
[nvme_cmd_zone_append] = "Zone Append",
};
static const char * const nvme_admin_ops[] = {
......
......@@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
if (ctrl->kas)
/*
* Completions of long-running commands should not be able to
* defer sending of periodic keep alives, since the controller
* may have completed processing such commands a long time ago
* (arbitrarily close to command submission time).
* req->deadline - req->timeout is the command submission time
* in jiffies.
*/
if (ctrl->kas &&
req->deadline - req->timeout >= ctrl->ka_last_check_time)
ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
......@@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
......@@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work);
}
if (ns)
return;
switch (cmd->common.opcode) {
case nvme_admin_set_features:
......@@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
{
unsigned long delay = ctrl->kato * HZ / 2;
/*
* When using Traffic Based Keep Alive, we need to run
* nvme_keep_alive_work at twice the normal frequency, as one
* command completion can postpone sending a keep alive command
* by up to twice the delay between runs.
*/
if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
delay /= 2;
return delay;
}
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
queue_delayed_work(nvme_wq, &ctrl->ka_work,
nvme_keep_alive_work_period(ctrl));
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
......@@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl);
/*
* Subtract off the keepalive RTT so nvme_keep_alive_work runs
* at the desired frequency.
*/
if (rtt <= delay) {
delay -= rtt;
} else {
dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
jiffies_to_msecs(rtt));
delay = 0;
}
blk_mq_free_request(rq);
......@@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
return RQ_END_IO_NONE;
}
ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false;
spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->state == NVME_CTRL_LIVE ||
......@@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE;
}
......@@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
bool comp_seen = ctrl->comp_seen;
struct request *rq;
ctrl->ka_last_check_time = jiffies;
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
......
......@@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
blk_mq_free_request(req);
if (effects)
nvme_passthru_end(ctrl, effects, cmd, ret);
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
return ret;
}
......
......@@ -328,6 +328,7 @@ struct nvme_ctrl {
struct delayed_work ka_work;
struct delayed_work failfast_work;
struct nvme_command ka_cmd;
unsigned long ka_last_check_time;
struct work_struct fw_act_work;
unsigned long events;
......@@ -1077,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
int nvme_execute_rq(struct request *rq, bool at_head);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
......
......@@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
blk_mq_free_request(rq);
if (effects)
nvme_passthru_end(ctrl, effects, req->cmd, status);
nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
}
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment