Commit c21e6beb authored by Jens Axboe's avatar Jens Axboe

block: get rid of QUEUE_FLAG_REENTER

We are currently using this flag to check whether it's safe
to call into ->request_fn(). If it is set, we punt to kblockd.
But we get a lot of false positives and excessive punts to
kblockd, which hurts performance.

The only real abuser of this infrastructure is SCSI. So export
the async queue run and convert SCSI over to use that. There's
room for improvement in that SCSI need not always use the async
call, but this fixes our performance issue and they can fix that
up in due time.
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 5f45c695
...@@ -303,15 +303,7 @@ void __blk_run_queue(struct request_queue *q) ...@@ -303,15 +303,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q))) if (unlikely(blk_queue_stopped(q)))
return; return;
/* q->request_fn(q);
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
} }
EXPORT_SYMBOL(__blk_run_queue); EXPORT_SYMBOL(__blk_run_queue);
...@@ -328,6 +320,7 @@ void blk_run_queue_async(struct request_queue *q) ...@@ -328,6 +320,7 @@ void blk_run_queue_async(struct request_queue *q)
if (likely(!blk_queue_stopped(q))) if (likely(!blk_queue_stopped(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
} }
EXPORT_SYMBOL(blk_run_queue_async);
/** /**
* blk_run_queue - run a single device queue * blk_run_queue - run a single device queue
......
...@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data); ...@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *); void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *); void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);
/* /*
* Internal atomic flags for request handling * Internal atomic flags for request handling
......
...@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
list_splice_init(&shost->starved_list, &starved_list); list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) { while (!list_empty(&starved_list)) {
int flagset;
/* /*
* As long as shost is accepting commands and we have * As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn * starved queues, call blk_run_queue. scsi_request_fn
...@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
continue; continue;
} }
spin_unlock(shost->host_lock); blk_run_queue_async(sdev->request_queue);
spin_lock(sdev->request_queue->queue_lock);
flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER,
&sdev->request_queue->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
__blk_run_queue(sdev->request_queue);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
} }
/* put any unprocessed entries back */ /* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list); list_splice(&starved_list, &shost->starved_list);
......
...@@ -3816,28 +3816,17 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, ...@@ -3816,28 +3816,17 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
static void static void
fc_bsg_goose_queue(struct fc_rport *rport) fc_bsg_goose_queue(struct fc_rport *rport)
{ {
int flagset;
unsigned long flags;
if (!rport->rqst_q) if (!rport->rqst_q)
return; return;
/*
* This get/put dance makes no sense
*/
get_device(&rport->dev); get_device(&rport->dev);
blk_run_queue_async(rport->rqst_q);
spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
__blk_run_queue(rport->rqst_q);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
put_device(&rport->dev); put_device(&rport->dev);
} }
/** /**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
* @q: rport request queue * @q: rport request queue
......
...@@ -388,20 +388,19 @@ struct request_queue ...@@ -388,20 +388,19 @@ struct request_queue
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_STACKABLE) | \
...@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q); ...@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q); extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *, extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, struct rq_map_data *, void __user *, unsigned long,
gfp_t); gfp_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment