Commit b6e68ee8 authored by Jan Kara's avatar Jan Kara Committed by Jens Axboe

blk-mq: Improve performance of non-mq IO schedulers with multiple HW queues

Currently when non-mq aware IO scheduler (BFQ, mq-deadline) is used for
a queue with multiple HW queues, the performance it rather bad. The
problem is that these IO schedulers use queue-wide locking and their
dispatch function does not respect the hctx it is passed in and returns
any request it finds appropriate. Thus locality of request access is
broken and dispatch from multiple CPUs just contends on IO scheduler
locks. For these IO schedulers there's little point in dispatching from
multiple CPUs. Instead dispatch always only from a single CPU to limit
contention.

Below is a comparison of dbench runs on XFS filesystem where the storage
is a raid card with 64 HW queues and to it attached a single rotating
disk. BFQ is used as IO scheduler:

      clients           MQ                     SQ             MQ-Patched
Amean 1      39.12 (0.00%)       43.29 * -10.67%*       36.09 *   7.74%*
Amean 2     128.58 (0.00%)      101.30 *  21.22%*       96.14 *  25.23%*
Amean 4     577.42 (0.00%)      494.47 *  14.37%*      508.49 *  11.94%*
Amean 8     610.95 (0.00%)      363.86 *  40.44%*      362.12 *  40.73%*
Amean 16    391.78 (0.00%)      261.49 *  33.25%*      282.94 *  27.78%*
Amean 32    324.64 (0.00%)      267.71 *  17.54%*      233.00 *  28.23%*
Amean 64    295.04 (0.00%)      253.02 *  14.24%*      242.37 *  17.85%*
Amean 512 10281.61 (0.00%)    10211.16 *   0.69%*    10447.53 *  -1.61%*

Numbers are times so lower is better. MQ is stock 5.10-rc6 kernel. SQ is
the same kernel with megaraid_sas.host_tagset_enable=0 so that the card
advertises just a single HW queue. MQ-Patched is a kernel with this
patch applied.

You can see multiple hardware queues heavily hurt performance in
combination with BFQ. The patch restores the performance.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5ac83c64
...@@ -1646,6 +1646,42 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -1646,6 +1646,42 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
} }
EXPORT_SYMBOL(blk_mq_run_hw_queue); EXPORT_SYMBOL(blk_mq_run_hw_queue);
/*
* Is the request queue handled by an IO scheduler that does not respect
* hardware queues when dispatching?
*/
static bool blk_mq_has_sqsched(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.dispatch_request &&
!(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
return true;
return false;
}
/*
* Return prefered queue to dispatch from (if any) for non-mq aware IO
* scheduler.
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
* dispatch from hctx for the current CPU since running multiple queues
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
raw_smp_processor_id());
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;
}
/** /**
* blk_mq_run_hw_queues - Run all hardware queues in a request queue. * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
* @q: Pointer to the request queue to run. * @q: Pointer to the request queue to run.
...@@ -1653,14 +1689,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queue); ...@@ -1653,14 +1689,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queue);
*/ */
void blk_mq_run_hw_queues(struct request_queue *q, bool async) void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i; int i;
sq_hctx = NULL;
if (blk_mq_has_sqsched(q))
sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx)) if (blk_mq_hctx_stopped(hctx))
continue; continue;
/*
blk_mq_run_hw_queue(hctx, async); * Dispatch from this hctx either if there's no hctx preferred
* by IO scheduler or if it has requests that bypass the
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
!list_empty_careful(&hctx->dispatch))
blk_mq_run_hw_queue(hctx, async);
} }
} }
EXPORT_SYMBOL(blk_mq_run_hw_queues); EXPORT_SYMBOL(blk_mq_run_hw_queues);
...@@ -1672,14 +1717,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues); ...@@ -1672,14 +1717,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
*/ */
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i; int i;
sq_hctx = NULL;
if (blk_mq_has_sqsched(q))
sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx)) if (blk_mq_hctx_stopped(hctx))
continue; continue;
/*
blk_mq_delay_run_hw_queue(hctx, msecs); * Dispatch from this hctx either if there's no hctx preferred
* by IO scheduler or if it has requests that bypass the
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
!list_empty_careful(&hctx->dispatch))
blk_mq_delay_run_hw_queue(hctx, msecs);
} }
} }
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
......
...@@ -1029,6 +1029,7 @@ static struct elevator_type kyber_sched = { ...@@ -1029,6 +1029,7 @@ static struct elevator_type kyber_sched = {
#endif #endif
.elevator_attrs = kyber_sched_attrs, .elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber", .elevator_name = "kyber",
.elevator_features = ELEVATOR_F_MQ_AWARE,
.elevator_owner = THIS_MODULE, .elevator_owner = THIS_MODULE,
}; };
......
...@@ -172,6 +172,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); ...@@ -172,6 +172,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
/* Supports zoned block devices sequential write constraint */ /* Supports zoned block devices sequential write constraint */
#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
/* Supports scheduling on multiple hardware queues */
#define ELEVATOR_F_MQ_AWARE (1U << 1)
#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLOCK */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment