Commit f036d67c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.5-2023-07-21' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - Fix for loop regressions (Mauricio)

 - Fix a potential stall with batched wakeups in sbitmap (David)

 - Fix for stall with recursive plug flushes (Ross)

 - Skip accounting of empty requests for blk-iocost (Chengming)

 - Remove a dead field in struct blk_mq_hw_ctx (Chengming)

* tag 'block-6.5-2023-07-21' of git://git.kernel.dk/linux:
  loop: do not enforce max_loop hard limit by (new) default
  loop: deprecate autoloading callback loop_probe()
  sbitmap: fix batching wakeup
  blk-iocost: skip empty flush bio in iocost
  blk-mq: delete dead struct blk_mq_hw_ctx->queued field
  blk-mq: Fix stall due to recursive flush plug
parents bdd1d82e bb5faa99
...@@ -1144,7 +1144,6 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) ...@@ -1144,7 +1144,6 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
{ {
if (!list_empty(&plug->cb_list)) if (!list_empty(&plug->cb_list))
flush_plug_callbacks(plug, from_schedule); flush_plug_callbacks(plug, from_schedule);
if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule); blk_mq_flush_plug_list(plug, from_schedule);
/* /*
* Unconditionally flush out cached requests, even if the unplug * Unconditionally flush out cached requests, even if the unplug
......
...@@ -2516,6 +2516,10 @@ static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, ...@@ -2516,6 +2516,10 @@ static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
u64 seek_pages = 0; u64 seek_pages = 0;
u64 cost = 0; u64 cost = 0;
/* Can't calculate cost for empty bio */
if (!bio->bi_iter.bi_size)
goto out;
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_READ: case REQ_OP_READ:
coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
......
...@@ -2754,7 +2754,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2754,7 +2754,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{ {
struct request *rq; struct request *rq;
if (rq_list_empty(plug->mq_list)) /*
* We may have been called recursively midway through handling
* plug->mq_list via a schedule() in the driver's queue_rq() callback.
* To avoid mq_list changing under our feet, clear rq_count early and
* bail out specifically if rq_count is 0 rather than checking
* whether the mq_list is empty.
*/
if (plug->rq_count == 0)
return; return;
plug->rq_count = 0; plug->rq_count = 0;
......
...@@ -1775,14 +1775,43 @@ static const struct block_device_operations lo_fops = { ...@@ -1775,14 +1775,43 @@ static const struct block_device_operations lo_fops = {
/* /*
* If max_loop is specified, create that many devices upfront. * If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified, * This also becomes a hard limit. If max_loop is not specified,
* the default isn't a hard limit (as before commit 85c50197716c
* changed the default value from 0 for max_loop=0 reasons), just
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the * init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing * /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node. * a 'dead' device node.
*/ */
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
module_param(max_loop, int, 0444);
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
static bool max_loop_specified;
static int max_loop_param_set_int(const char *val,
const struct kernel_param *kp)
{
int ret;
ret = param_set_int(val, kp);
if (ret < 0)
return ret;
max_loop_specified = true;
return 0;
}
static const struct kernel_param_ops max_loop_param_ops = {
.set = max_loop_param_set_int,
.get = param_get_int,
};
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
#else
module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
#endif
module_param(max_part, int, 0444); module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
...@@ -2093,14 +2122,18 @@ static void loop_remove(struct loop_device *lo) ...@@ -2093,14 +2122,18 @@ static void loop_remove(struct loop_device *lo)
put_disk(lo->lo_disk); put_disk(lo->lo_disk);
} }
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
static void loop_probe(dev_t dev) static void loop_probe(dev_t dev)
{ {
int idx = MINOR(dev) >> part_shift; int idx = MINOR(dev) >> part_shift;
if (max_loop && idx >= max_loop) if (max_loop_specified && max_loop && idx >= max_loop)
return; return;
loop_add(idx); loop_add(idx);
} }
#else
#define loop_probe NULL
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
static int loop_control_remove(int idx) static int loop_control_remove(int idx)
{ {
...@@ -2281,6 +2314,9 @@ module_exit(loop_exit); ...@@ -2281,6 +2314,9 @@ module_exit(loop_exit);
static int __init max_loop_setup(char *str) static int __init max_loop_setup(char *str)
{ {
max_loop = simple_strtol(str, NULL, 0); max_loop = simple_strtol(str, NULL, 0);
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
max_loop_specified = true;
#endif
return 1; return 1;
} }
......
...@@ -397,8 +397,6 @@ struct blk_mq_hw_ctx { ...@@ -397,8 +397,6 @@ struct blk_mq_hw_ctx {
*/ */
struct blk_mq_tags *sched_tags; struct blk_mq_tags *sched_tags;
/** @queued: Number of queued requests. */
unsigned long queued;
/** @run: Number of dispatched requests. */ /** @run: Number of dispatched requests. */
unsigned long run; unsigned long run;
......
...@@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); ...@@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{ {
int i, wake_index; int i, wake_index, woken;
if (!atomic_read(&sbq->ws_active)) if (!atomic_read(&sbq->ws_active))
return; return;
...@@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) ...@@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
*/ */
wake_index = sbq_index_inc(wake_index); wake_index = sbq_index_inc(wake_index);
/* if (waitqueue_active(&ws->wait)) {
* It is sufficient to wake up at least one waiter to woken = wake_up_nr(&ws->wait, nr);
* guarantee forward progress. if (woken == nr)
*/
if (waitqueue_active(&ws->wait) &&
wake_up_nr(&ws->wait, nr))
break; break;
nr -= woken;
}
} }
if (wake_index != atomic_read(&sbq->wake_index)) if (wake_index != atomic_read(&sbq->wake_index))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment