Commit c4110804 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

kyber: avoid q->disk dereferences in trace points

q->disk becomes invalid after the gendisk is removed.  Work around this
by caching the dev_t for the tracepoints.  The real fix would be to
properly tear down the I/O schedulers with the gendisk, but that is
a much more invasive change.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211012093301.GA27795@lst.deTested-by: default avatarYi Zhang <yi.zhang@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent aec89dc5
......@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
struct kyber_queue_data {
struct request_queue *q;
dev_t dev;
/*
* Each scheduling domain has a limited number of in-flight requests
......@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
}
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
kyber_latency_type_names[type], percentile,
bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
......@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
depth);
}
}
......@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
goto err;
kqd->q = q;
kqd->dev = disk_devt(q->disk);
kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
GFP_KERNEL | __GFP_ZERO);
......@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist);
return rq;
} else {
trace_kyber_throttled(kqd->q,
trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]);
}
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
......@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist);
return rq;
} else {
trace_kyber_throttled(kqd->q,
trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]);
}
}
......
......@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency,
TP_PROTO(struct request_queue *q, const char *domain, const char *type,
TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples),
TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry(
__field( dev_t, dev )
......@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
),
TP_fast_assign(
__entry->dev = disk_devt(q->disk);
__entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
......@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust,
TP_PROTO(struct request_queue *q, const char *domain,
unsigned int depth),
TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
TP_ARGS(q, domain, depth),
TP_ARGS(dev, domain, depth),
TP_STRUCT__entry(
__field( dev_t, dev )
......@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
),
TP_fast_assign(
__entry->dev = disk_devt(q->disk);
__entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
......@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled,
TP_PROTO(struct request_queue *q, const char *domain),
TP_PROTO(dev_t dev, const char *domain),
TP_ARGS(q, domain),
TP_ARGS(dev, domain),
TP_STRUCT__entry(
__field( dev_t, dev )
......@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
),
TP_fast_assign(
__entry->dev = disk_devt(q->disk);
__entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment