Commit edc00350 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.2-2023-01-20' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "Various little tweaks all over the place:

   - NVMe pull request via Christoph:
       - fix controller shutdown regression in nvme-apple (Janne Grunau)
       - fix a polling on timeout regression in nvme-pci (Keith Busch)

   - Fix a bug in the read request side request allocation caching
     (Pavel)

   - pktcdvd was brought back after we configured a NULL return on bio
     splits, make it consistent with the others (me)

   - BFQ refcount fix (Yu)

   - Block cgroup policy activation fix (Yu)

   - Fix for an md regression introduced in the 6.2 cycle (Adrian)"

* tag 'block-6.2-2023-01-20' of git://git.kernel.dk/linux:
  nvme-pci: fix timeout request state check
  nvme-apple: only reset the controller when RTKit is running
  nvme-apple: reset controller during shutdown
  block: fix hctx checks for batch allocation
  block/rnbd-clt: fix wrong max ID in ida_alloc_max
  blk-cgroup: fix missing pd_online_fn() while activating policy
  pktcdvd: check for NULL returna fter calling bio_split_to_limits()
  block, bfq: switch 'bfqg->ref' to use atomic refcount apis
  md: fix incorrect declaration about claim_rdev in md_import_device
parents 9c38747f 955bc122
...@@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) ...@@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
static void bfqg_get(struct bfq_group *bfqg) static void bfqg_get(struct bfq_group *bfqg)
{ {
bfqg->ref++; refcount_inc(&bfqg->ref);
} }
static void bfqg_put(struct bfq_group *bfqg) static void bfqg_put(struct bfq_group *bfqg)
{ {
bfqg->ref--; if (refcount_dec_and_test(&bfqg->ref))
if (bfqg->ref == 0)
kfree(bfqg); kfree(bfqg);
} }
...@@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, ...@@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
} }
/* see comments in bfq_bic_update_cgroup for why refcounting */ /* see comments in bfq_bic_update_cgroup for why refcounting */
bfqg_get(bfqg); refcount_set(&bfqg->ref, 1);
return &bfqg->pd; return &bfqg->pd;
} }
......
...@@ -928,7 +928,7 @@ struct bfq_group { ...@@ -928,7 +928,7 @@ struct bfq_group {
char blkg_path[128]; char blkg_path[128];
/* reference counter (see comments in bfq_bic_update_cgroup) */ /* reference counter (see comments in bfq_bic_update_cgroup) */
int ref; refcount_t ref;
/* Is bfq_group still online? */ /* Is bfq_group still online? */
bool online; bool online;
......
...@@ -1455,6 +1455,10 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -1455,6 +1455,10 @@ int blkcg_activate_policy(struct request_queue *q,
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]); pol->pd_init_fn(blkg->pd[pol->plid]);
if (pol->pd_online_fn)
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_online_fn(blkg->pd[pol->plid]);
__set_bit(pol->plid, q->blkcg_pols); __set_bit(pol->plid, q->blkcg_pols);
ret = 0; ret = 0;
......
...@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, ...@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs) struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{ {
struct request *rq; struct request *rq;
enum hctx_type type, hctx_type;
if (!plug) if (!plug)
return NULL; return NULL;
...@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, ...@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return NULL; return NULL;
} }
if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL; return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL; return NULL;
......
...@@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio) ...@@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio)
struct bio *split; struct bio *split;
bio = bio_split_to_limits(bio); bio = bio_split_to_limits(bio);
if (!bio)
return;
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
......
...@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, ...@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc; goto out_alloc;
} }
ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS), ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
......
...@@ -3644,7 +3644,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init); ...@@ -3644,7 +3644,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
*/ */
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{ {
static struct md_rdev *claim_rdev; /* just for claiming the bdev */ static struct md_rdev claim_rdev; /* just for claiming the bdev */
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t size; sector_t size;
int err; int err;
...@@ -3662,7 +3662,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe ...@@ -3662,7 +3662,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
rdev->bdev = blkdev_get_by_dev(newdev, rdev->bdev = blkdev_get_by_dev(newdev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
super_format == -2 ? claim_rdev : rdev); super_format == -2 ? &claim_rdev : rdev);
if (IS_ERR(rdev->bdev)) { if (IS_ERR(rdev->bdev)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n", pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev)); MAJOR(newdev), MINOR(newdev));
......
...@@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) ...@@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
apple_nvme_remove_cq(anv); apple_nvme_remove_cq(anv);
} }
nvme_disable_ctrl(&anv->ctrl, shutdown); /*
* Always disable the NVMe controller after shutdown.
* We need to do this to bring it back up later anyway, and we
* can't do it while the firmware is not running (e.g. in the
* resume reset path before RTKit is initialized), so for Apple
* controllers it makes sense to unconditionally do it here.
* Additionally, this sequence of events is reliable, while
* others (like disabling after bringing back the firmware on
* resume) seem to run into trouble under some circumstances.
*
* Both U-Boot and m1n1 also use this convention (i.e. an ANS
* NVMe controller is handed off with firmware shut down, in an
* NVMe disabled state, after a clean shutdown).
*/
if (shutdown)
nvme_disable_ctrl(&anv->ctrl, shutdown);
nvme_disable_ctrl(&anv->ctrl, false);
} }
WRITE_ONCE(anv->ioq.enabled, false); WRITE_ONCE(anv->ioq.enabled, false);
...@@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work) ...@@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work)
goto out; goto out;
} }
if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
apple_nvme_disable(anv, false);
/* RTKit must be shut down cleanly for the (soft)-reset to work */ /* RTKit must be shut down cleanly for the (soft)-reset to work */
if (apple_rtkit_is_running(anv->rtk)) { if (apple_rtkit_is_running(anv->rtk)) {
/* reset the controller if it is enabled */
if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
apple_nvme_disable(anv, false);
dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
ret = apple_rtkit_shutdown(anv->rtk); ret = apple_rtkit_shutdown(anv->rtk);
if (ret) if (ret)
......
...@@ -1362,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) ...@@ -1362,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
else else
nvme_poll_irqdisable(nvmeq); nvme_poll_irqdisable(nvmeq);
if (blk_mq_request_completed(req)) { if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n", "I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment