Commit 569c3a28 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.2-2022-12-19' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - Various fixes for BFQ (Yu, Yuwei)

 - Fix for loop command line parsing (Isaac)

 - No need to specifically clear REQ_ALLOC_CACHE on IOPOLL downgrade
   anymore (me)

 - blk-iocost enum fix for newer gcc (Jiri)

 - UAF fix for queue release (Ming)

 - blk-iolatency error handling memory leak fix (Tejun)

* tag 'block-6.2-2022-12-19' of git://git.kernel.dk/linux:
  block: don't clear REQ_ALLOC_CACHE for non-polled requests
  block: fix use-after-free of q->q_usage_counter
  block, bfq: only do counting of pending-request for BFQ_GROUP_IOSCHED
  blk-iolatency: Fix memory leak on add_disk() failures
  loop: Fix the max_loop commandline argument treatment when it is set to 0
  block/blk-iocost (gcc13): keep large values in a new enum
  block, bfq: replace 0/1 with false/true in bic apis
  block, bfq: don't return bfqg from __bfq_bic_change_cgroup()
  block, bfq: fix possible uaf for 'bfqq->bic'
parents 5d4740fc 53eab8e7
...@@ -724,19 +724,19 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -724,19 +724,19 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* sure that the reference to cgroup is valid across the call (see * sure that the reference to cgroup is valid across the call (see
* comments in bfq_bic_update_cgroup on this issue) * comments in bfq_bic_update_cgroup on this issue)
*/ */
static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
struct bfq_io_cq *bic, struct bfq_io_cq *bic,
struct bfq_group *bfqg) struct bfq_group *bfqg)
{ {
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
struct bfq_entity *entity; struct bfq_entity *entity;
if (async_bfqq) { if (async_bfqq) {
entity = &async_bfqq->entity; entity = &async_bfqq->entity;
if (entity->sched_data != &bfqg->sched_data) { if (entity->sched_data != &bfqg->sched_data) {
bic_set_bfqq(bic, NULL, 0); bic_set_bfqq(bic, NULL, false);
bfq_release_process_ref(bfqd, async_bfqq); bfq_release_process_ref(bfqd, async_bfqq);
} }
} }
...@@ -772,12 +772,10 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, ...@@ -772,12 +772,10 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
*/ */
bfq_put_cooperator(sync_bfqq); bfq_put_cooperator(sync_bfqq);
bfq_release_process_ref(bfqd, sync_bfqq); bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, 1); bic_set_bfqq(bic, NULL, true);
} }
} }
} }
return bfqg;
} }
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
......
...@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); ...@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq);
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
{ {
struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
/* Clear bic pointer if bfqq is detached from this bic */
if (old_bfqq && old_bfqq->bic == bic)
old_bfqq->bic = NULL;
/* /*
* If bfqq != NULL, then a non-stable queue merge between * If bfqq != NULL, then a non-stable queue merge between
* bic->bfqq and bfqq is happening here. This causes troubles * bic->bfqq and bfqq is happening here. This causes troubles
...@@ -3108,7 +3114,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, ...@@ -3108,7 +3114,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
/* /*
* Merge queues (that is, let bic redirect its requests to new_bfqq) * Merge queues (that is, let bic redirect its requests to new_bfqq)
*/ */
bic_set_bfqq(bic, new_bfqq, 1); bic_set_bfqq(bic, new_bfqq, true);
bfq_mark_bfqq_coop(new_bfqq); bfq_mark_bfqq_coop(new_bfqq);
/* /*
* new_bfqq now belongs to at least two bics (it is a shared queue): * new_bfqq now belongs to at least two bics (it is a shared queue):
...@@ -5311,7 +5317,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) ...@@ -5311,7 +5317,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags); spin_lock_irqsave(&bfqd->lock, flags);
bfqq->bic = NULL;
bfq_exit_bfqq(bfqd, bfqq); bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync); bic_set_bfqq(bic, NULL, is_sync);
spin_unlock_irqrestore(&bfqd->lock, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
...@@ -6557,7 +6562,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) ...@@ -6557,7 +6562,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
return bfqq; return bfqq;
} }
bic_set_bfqq(bic, NULL, 1); bic_set_bfqq(bic, NULL, true);
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
...@@ -7058,7 +7063,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) ...@@ -7058,7 +7063,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer; bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT_CACHED; bfqd->queue_weights_tree = RB_ROOT_CACHED;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqd->num_groups_with_pending_reqs = 0; bfqd->num_groups_with_pending_reqs = 0;
#endif
INIT_LIST_HEAD(&bfqd->active_list); INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list); INIT_LIST_HEAD(&bfqd->idle_list);
......
...@@ -197,8 +197,10 @@ struct bfq_entity { ...@@ -197,8 +197,10 @@ struct bfq_entity {
/* flag, set to request a weight, ioprio or ioprio_class change */ /* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed; int prio_changed;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
/* flag, set if the entity is counted in groups_with_pending_reqs */ /* flag, set if the entity is counted in groups_with_pending_reqs */
bool in_groups_with_pending_reqs; bool in_groups_with_pending_reqs;
#endif
/* last child queue of entity created (for non-leaf entities) */ /* last child queue of entity created (for non-leaf entities) */
struct bfq_queue *last_bfqq_created; struct bfq_queue *last_bfqq_created;
...@@ -491,6 +493,7 @@ struct bfq_data { ...@@ -491,6 +493,7 @@ struct bfq_data {
*/ */
struct rb_root_cached queue_weights_tree; struct rb_root_cached queue_weights_tree;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
/* /*
* Number of groups with at least one process that * Number of groups with at least one process that
* has at least one request waiting for completion. Note that * has at least one request waiting for completion. Note that
...@@ -538,6 +541,7 @@ struct bfq_data { ...@@ -538,6 +541,7 @@ struct bfq_data {
* with no request waiting for completion. * with no request waiting for completion.
*/ */
unsigned int num_groups_with_pending_reqs; unsigned int num_groups_with_pending_reqs;
#endif
/* /*
* Per-class (RT, BE, IDLE) number of bfq_queues containing * Per-class (RT, BE, IDLE) number of bfq_queues containing
......
...@@ -1612,28 +1612,28 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -1612,28 +1612,28 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
{ {
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_entity *entity = &bfqq->entity; struct bfq_entity *entity = &bfqq->entity;
if (!entity->in_groups_with_pending_reqs) { if (!entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = true; entity->in_groups_with_pending_reqs = true;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++)) if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++))
bfqq->bfqd->num_groups_with_pending_reqs++; bfqq->bfqd->num_groups_with_pending_reqs++;
#endif
} }
#endif
} }
void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
{ {
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_entity *entity = &bfqq->entity; struct bfq_entity *entity = &bfqq->entity;
if (entity->in_groups_with_pending_reqs) { if (entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = false; entity->in_groups_with_pending_reqs = false;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs)) if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs))
bfqq->bfqd->num_groups_with_pending_reqs--; bfqq->bfqd->num_groups_with_pending_reqs--;
#endif
} }
#endif
} }
/* /*
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "blk-cgroup.h" #include "blk-cgroup.h"
#include "blk-ioprio.h" #include "blk-ioprio.h"
#include "blk-throttle.h" #include "blk-throttle.h"
#include "blk-rq-qos.h"
/* /*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
...@@ -1322,6 +1323,7 @@ int blkcg_init_disk(struct gendisk *disk) ...@@ -1322,6 +1323,7 @@ int blkcg_init_disk(struct gendisk *disk)
void blkcg_exit_disk(struct gendisk *disk) void blkcg_exit_disk(struct gendisk *disk)
{ {
blkg_destroy_all(disk); blkg_destroy_all(disk);
rq_qos_exit(disk->queue);
blk_throtl_exit(disk); blk_throtl_exit(disk);
} }
......
...@@ -254,14 +254,15 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only); ...@@ -254,14 +254,15 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only);
static void blk_free_queue_rcu(struct rcu_head *rcu_head) static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{ {
kmem_cache_free(blk_requestq_cachep, struct request_queue *q = container_of(rcu_head,
container_of(rcu_head, struct request_queue, rcu_head)); struct request_queue, rcu_head);
percpu_ref_exit(&q->q_usage_counter);
kmem_cache_free(blk_requestq_cachep, q);
} }
static void blk_free_queue(struct request_queue *q) static void blk_free_queue(struct request_queue *q)
{ {
percpu_ref_exit(&q->q_usage_counter);
if (q->poll_stat) if (q->poll_stat)
blk_stat_remove_callback(q, q->poll_cb); blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb); blk_stat_free_callback(q->poll_cb);
......
...@@ -232,7 +232,9 @@ enum { ...@@ -232,7 +232,9 @@ enum {
/* 1/64k is granular enough and can easily be handled w/ u32 */ /* 1/64k is granular enough and can easily be handled w/ u32 */
WEIGHT_ONE = 1 << 16, WEIGHT_ONE = 1 << 16,
};
enum {
/* /*
* As vtime is used to calculate the cost of each IO, it needs to * As vtime is used to calculate the cost of each IO, it needs to
* be fairly high precision. For example, it should be able to * be fairly high precision. For example, it should be able to
......
...@@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = { ...@@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = {
/* /*
* And now the modules code and kernel interface. * And now the modules code and kernel interface.
*/ */
static int max_loop;
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
module_param(max_loop, int, 0444); module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0444); module_param(max_part, int, 0444);
...@@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control"); ...@@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control");
static int __init loop_init(void) static int __init loop_init(void)
{ {
int i, nr; int i;
int err; int err;
part_shift = 0; part_shift = 0;
...@@ -2209,19 +2218,6 @@ static int __init loop_init(void) ...@@ -2209,19 +2218,6 @@ static int __init loop_init(void)
goto err_out; goto err_out;
} }
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
if (max_loop)
nr = max_loop;
else
nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
err = misc_register(&loop_misc); err = misc_register(&loop_misc);
if (err < 0) if (err < 0)
goto err_out; goto err_out;
...@@ -2233,7 +2229,7 @@ static int __init loop_init(void) ...@@ -2233,7 +2229,7 @@ static int __init loop_init(void)
} }
/* pre-create number of devices given by config or max_loop */ /* pre-create number of devices given by config or max_loop */
for (i = 0; i < nr; i++) for (i = 0; i < max_loop; i++)
loop_add(i); loop_add(i);
printk(KERN_INFO "loop: module loaded\n"); printk(KERN_INFO "loop: module loaded\n");
......
...@@ -782,8 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) ...@@ -782,8 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
static inline void bio_clear_polled(struct bio *bio) static inline void bio_clear_polled(struct bio *bio)
{ {
/* can't support alloc cache if we turn off polling */ bio->bi_opf &= ~REQ_POLLED;
bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
} }
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment