Commit 68e9e9fe authored by Tejun Heo's avatar Tejun Heo Committed by Ben Hutchings

block: add blk_queue_dead()

commit 34f6055c upstream.

There are a number of QUEUE_FLAG_DEAD tests.  Add blk_queue_dead()
macro and use it.

This patch doesn't introduce any functional difference.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent a7c5635a
...@@ -607,7 +607,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue); ...@@ -607,7 +607,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q) int blk_get_queue(struct request_queue *q)
{ {
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { if (likely(!blk_queue_dead(q))) {
kobject_get(&q->kobj); kobject_get(&q->kobj);
return 0; return 0;
} }
...@@ -754,7 +754,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -754,7 +754,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0; const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue; int may_queue;
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
may_queue = elv_may_queue(q, rw_flags); may_queue = elv_may_queue(q, rw_flags);
...@@ -874,7 +874,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ...@@ -874,7 +874,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct io_context *ioc; struct io_context *ioc;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
......
...@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
{ {
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { if (unlikely(blk_queue_dead(q))) {
rq->errors = -ENXIO; rq->errors = -ENXIO;
if (rq->end_io) if (rq->end_io)
rq->end_io(rq, rq->errors); rq->end_io(rq, rq->errors);
......
...@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) ...@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show) if (!entry->show)
return -EIO; return -EIO;
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (blk_queue_dead(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
...@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj); q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (blk_queue_dead(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
......
...@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
struct request_queue *q = td->queue; struct request_queue *q = td->queue;
/* no throttling for dead queue */ /* no throttling for dead queue */
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
...@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
/* Make sure @q is still alive */ /* Make sure @q is still alive */
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { if (unlikely(blk_queue_dead(q))) {
kfree(tg); kfree(tg);
return NULL; return NULL;
} }
......
...@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1; q->flush_queue_delayed = 1;
return NULL; return NULL;
} }
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || if (unlikely(blk_queue_dead(q)) ||
!q->elevator->ops->elevator_dispatch_fn(q, 0)) !q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL; return NULL;
} }
......
...@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \ #define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment