Commit d56b9b28 authored by Mike Snitzer's avatar Mike Snitzer

dm: remove request-based DM queue's lld_busy_fn hook

DM multipath is the only caller of blk_lld_busy() -- which calls a
queue's lld_busy_fn hook.  Request-based DM doesn't support stacking
multipath devices so there is no reason to register the lld_busy_fn hook
on a multipath device's queue using blk_queue_lld_busy().

As such, remove functions dm_lld_busy and dm_table_any_busy_target.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 52b09914
...@@ -1677,20 +1677,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) ...@@ -1677,20 +1677,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
return r; return r;
} }
int dm_table_any_busy_target(struct dm_table *t)
{
unsigned i;
struct dm_target *ti;
for (i = 0; i < t->num_targets; i++) {
ti = t->targets + i;
if (ti->type->busy && ti->type->busy(ti))
return 1;
}
return 0;
}
struct mapped_device *dm_table_get_md(struct dm_table *t) struct mapped_device *dm_table_get_md(struct dm_table *t)
{ {
return t->md; return t->md;
......
...@@ -2006,22 +2006,6 @@ static void dm_request_fn(struct request_queue *q) ...@@ -2006,22 +2006,6 @@ static void dm_request_fn(struct request_queue *q)
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
} }
static int dm_lld_busy(struct request_queue *q)
{
int r;
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table_fast(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
else
r = dm_table_any_busy_target(map);
dm_put_live_table_fast(md);
return r;
}
static int dm_any_congested(void *congested_data, int bdi_bits) static int dm_any_congested(void *congested_data, int bdi_bits)
{ {
int r = bdi_bits; int r = bdi_bits;
...@@ -2545,7 +2529,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) ...@@ -2545,7 +2529,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
dm_init_md_queue(md); dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn); blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
/* Also initialize the request-based DM worker thread */ /* Also initialize the request-based DM worker thread */
init_kthread_worker(&md->kworker); init_kthread_worker(&md->kworker);
......
...@@ -70,7 +70,6 @@ void dm_table_presuspend_undo_targets(struct dm_table *t); ...@@ -70,7 +70,6 @@ void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment