Commit 9ebc0ecb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.0-2022-09-09' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull via Christoph:
      - fix a use after free in nvmet (Bart Van Assche)
      - fix a use after free when detecting digest errors
        (Sagi Grimberg)
      - fix regression that causes sporadic TCP requests to time out
        (Sagi Grimberg)
      - fix two off by ones errors in the nvmet ZNS support
        (Dennis Maisenbacher)
      - requeue aen after firmware activation (Keith Busch)

 - Fix missing request flags in debugfs code (me)

 - Partition scan fix (Ming)

* tag 'block-6.0-2022-09-09' of git://git.kernel.dk/linux-block:
  block: add missing request flags to debugfs code
  nvme: requeue aen after firmware activation
  nvmet: fix mar and mor off-by-one errors
  nvme-tcp: fix regression that causes sporadic requests to time out
  nvme-tcp: fix UAF when detecting digest errors
  nvmet: fix a use-after-free
  block: don't add partitions if GD_SUPPRESS_PART_SCAN is set
parents d2b768c3 745ed372
...@@ -283,7 +283,9 @@ static const char *const rqf_name[] = { ...@@ -283,7 +283,9 @@ static const char *const rqf_name[] = {
RQF_NAME(SPECIAL_PAYLOAD), RQF_NAME(SPECIAL_PAYLOAD),
RQF_NAME(ZONE_WRITE_LOCKED), RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(MQ_POLL_SLEPT), RQF_NAME(MQ_POLL_SLEPT),
RQF_NAME(TIMED_OUT),
RQF_NAME(ELV), RQF_NAME(ELV),
RQF_NAME(RESV),
}; };
#undef RQF_NAME #undef RQF_NAME
......
...@@ -596,6 +596,9 @@ static int blk_add_partitions(struct gendisk *disk) ...@@ -596,6 +596,9 @@ static int blk_add_partitions(struct gendisk *disk)
if (disk->flags & GENHD_FL_NO_PART) if (disk->flags & GENHD_FL_NO_PART)
return 0; return 0;
if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
return 0;
state = check_partition(disk); state = check_partition(disk);
if (!state) if (!state)
return 0; return 0;
......
...@@ -4703,6 +4703,8 @@ static void nvme_fw_act_work(struct work_struct *work) ...@@ -4703,6 +4703,8 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
/* read FW slot information to clear the AER */ /* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl); nvme_get_fw_slot_info(ctrl);
queue_work(nvme_wq, &ctrl->async_event_work);
} }
static u32 nvme_aer_type(u32 result) static u32 nvme_aer_type(u32 result)
...@@ -4715,9 +4717,10 @@ static u32 nvme_aer_subtype(u32 result) ...@@ -4715,9 +4717,10 @@ static u32 nvme_aer_subtype(u32 result)
return (result & 0xff00) >> 8; return (result & 0xff00) >> 8;
} }
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{ {
u32 aer_notice_type = nvme_aer_subtype(result); u32 aer_notice_type = nvme_aer_subtype(result);
bool requeue = true;
trace_nvme_async_event(ctrl, aer_notice_type); trace_nvme_async_event(ctrl, aer_notice_type);
...@@ -4734,6 +4737,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) ...@@ -4734,6 +4737,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
*/ */
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
nvme_auth_stop(ctrl); nvme_auth_stop(ctrl);
requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work); queue_work(nvme_wq, &ctrl->fw_act_work);
} }
break; break;
...@@ -4750,6 +4754,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) ...@@ -4750,6 +4754,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
default: default:
dev_warn(ctrl->device, "async event result %08x\n", result); dev_warn(ctrl->device, "async event result %08x\n", result);
} }
return requeue;
} }
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
...@@ -4765,13 +4770,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -4765,13 +4770,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
u32 result = le32_to_cpu(res->u32); u32 result = le32_to_cpu(res->u32);
u32 aer_type = nvme_aer_type(result); u32 aer_type = nvme_aer_type(result);
u32 aer_subtype = nvme_aer_subtype(result); u32 aer_subtype = nvme_aer_subtype(result);
bool requeue = true;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return; return;
switch (aer_type) { switch (aer_type) {
case NVME_AER_NOTICE: case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result); requeue = nvme_handle_aen_notice(ctrl, result);
break; break;
case NVME_AER_ERROR: case NVME_AER_ERROR:
/* /*
...@@ -4792,7 +4798,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -4792,7 +4798,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default: default:
break; break;
} }
queue_work(nvme_wq, &ctrl->async_event_work);
if (requeue)
queue_work(nvme_wq, &ctrl->async_event_work);
} }
EXPORT_SYMBOL_GPL(nvme_complete_async_event); EXPORT_SYMBOL_GPL(nvme_complete_async_event);
......
...@@ -121,7 +121,6 @@ struct nvme_tcp_queue { ...@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
struct mutex send_mutex; struct mutex send_mutex;
struct llist_head req_list; struct llist_head req_list;
struct list_head send_list; struct list_head send_list;
bool more_requests;
/* recv state */ /* recv state */
void *pdu; void *pdu;
...@@ -320,7 +319,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) ...@@ -320,7 +319,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{ {
return !list_empty(&queue->send_list) || return !list_empty(&queue->send_list) ||
!llist_empty(&queue->req_list) || queue->more_requests; !llist_empty(&queue->req_list);
} }
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
...@@ -339,9 +338,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, ...@@ -339,9 +338,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
*/ */
if (queue->io_cpu == raw_smp_processor_id() && if (queue->io_cpu == raw_smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) { sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
nvme_tcp_send_all(queue); nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex); mutex_unlock(&queue->send_mutex);
} }
...@@ -1229,7 +1226,7 @@ static void nvme_tcp_io_work(struct work_struct *w) ...@@ -1229,7 +1226,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
else if (unlikely(result < 0)) else if (unlikely(result < 0))
return; return;
if (!pending) if (!pending || !queue->rd_enabled)
return; return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
......
...@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) ...@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{ {
struct nvmet_ns *ns = req->ns;
if (!req->sq->sqhd_disabled) if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req); nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid); req->cqe->sq_id = cpu_to_le16(req->sq->qid);
...@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) ...@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req); trace_nvmet_req_complete(req);
if (req->ns)
nvmet_put_namespace(req->ns);
req->ops->queue_response(req); req->ops->queue_response(req);
if (ns)
nvmet_put_namespace(ns);
} }
void nvmet_req_complete(struct nvmet_req *req, u16 status) void nvmet_req_complete(struct nvmet_req *req, u16 status)
......
...@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) ...@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
struct nvme_id_ns_zns *id_zns; struct nvme_id_ns_zns *id_zns;
u64 zsze; u64 zsze;
u16 status; u16 status;
u32 mar, mor;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid); req->error_loc = offsetof(struct nvme_identify, nsid);
...@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) ...@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift; req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze); id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); mor = bdev_max_open_zones(req->ns->bdev);
if (!mor)
mor = U32_MAX;
else
mor--;
id_zns->mor = cpu_to_le32(mor);
mar = bdev_max_active_zones(req->ns->bdev);
if (!mar)
mar = U32_MAX;
else
mar--;
id_zns->mar = cpu_to_le32(mar);
done: done:
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment