Commit 76d697d1 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: fix hctx/ctx kobject use-after-free

The kobject memory shouldn't have been freed before the kobject
is released because driver core can access it freely before its
release.

This patch frees hctx in its release callback. For ctx, they
share one single per-cpu variable which is associated with
the request queue, so free ctx in q->mq_kobj's release handler.
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
(fix ctx kobjects)
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6222d172
...@@ -15,6 +15,26 @@ ...@@ -15,6 +15,26 @@
static void blk_mq_sysfs_release(struct kobject *kobj) static void blk_mq_sysfs_release(struct kobject *kobj)
{ {
struct request_queue *q;
q = container_of(kobj, struct request_queue, mq_kobj);
free_percpu(q->queue_ctx);
}
static void blk_mq_ctx_release(struct kobject *kobj)
{
struct blk_mq_ctx *ctx;
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
kobject_put(&ctx->queue->mq_kobj);
}
static void blk_mq_hctx_release(struct kobject *kobj)
{
struct blk_mq_hw_ctx *hctx;
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
kfree(hctx);
} }
struct blk_mq_ctx_sysfs_entry { struct blk_mq_ctx_sysfs_entry {
...@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = { ...@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = { static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops, .sysfs_ops = &blk_mq_sysfs_ops,
.default_attrs = default_ctx_attrs, .default_attrs = default_ctx_attrs,
.release = blk_mq_sysfs_release, .release = blk_mq_ctx_release,
}; };
static struct kobj_type blk_mq_hw_ktype = { static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops, .sysfs_ops = &blk_mq_hw_sysfs_ops,
.default_attrs = default_hw_ctx_attrs, .default_attrs = default_hw_ctx_attrs,
.release = blk_mq_sysfs_release, .release = blk_mq_hctx_release,
}; };
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
...@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) ...@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret; return ret;
hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx(hctx, ctx, i) {
kobject_get(&q->mq_kobj);
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
if (ret) if (ret)
break; break;
......
...@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q, ...@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
unsigned int i; unsigned int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i)
free_cpumask_var(hctx->cpumask); free_cpumask_var(hctx->cpumask);
kfree(hctx);
}
} }
static int blk_mq_init_hctx(struct request_queue *q, static int blk_mq_init_hctx(struct request_queue *q,
...@@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)
percpu_ref_exit(&q->mq_usage_counter); percpu_ref_exit(&q->mq_usage_counter);
free_percpu(q->queue_ctx);
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
kfree(q->mq_map); kfree(q->mq_map);
q->queue_ctx = NULL;
q->queue_hw_ctx = NULL; q->queue_hw_ctx = NULL;
q->mq_map = NULL; q->mq_map = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment