Commit 97961ef4 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/blkback: Move the plugging/unplugging to a higher level.

We used to the plug/unplug on the submit_bio. But that means
if within a stream of WRITE, WRITE, WRITE,...,WRITE we have
one READ, it could stall the pipeline (as the 'submio_bio'
could trigger the unplug_fnc to be called and stall/sync
when doing the READ). Instead we want to move the unplugging
when the whole (or as a much as possible) ring buffer has been
processed. This also eliminates us doing plug/unplug for
each request.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 8b6bf747
...@@ -276,6 +276,8 @@ int xen_blkif_schedule(void *arg) ...@@ -276,6 +276,8 @@ int xen_blkif_schedule(void *arg)
printk(KERN_DEBUG "%s: started\n", current->comm); printk(KERN_DEBUG "%s: started\n", current->comm);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
struct blk_plug plug;
if (try_to_freeze()) if (try_to_freeze())
continue; continue;
if (unlikely(vbd->size != vbd_sz(vbd))) if (unlikely(vbd->size != vbd_sz(vbd)))
...@@ -292,9 +294,13 @@ int xen_blkif_schedule(void *arg) ...@@ -292,9 +294,13 @@ int xen_blkif_schedule(void *arg)
blkif->waiting_reqs = 0; blkif->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */ smp_mb(); /* clear flag *before* checking for work */
blk_start_plug(&plug);
if (do_block_io_op(blkif)) if (do_block_io_op(blkif))
blkif->waiting_reqs = 1; blkif->waiting_reqs = 1;
blk_finish_plug(&plug);
if (log_stats && time_after(jiffies, blkif->st_print)) if (log_stats && time_after(jiffies, blkif->st_print))
print_stats(blkif); print_stats(blkif);
} }
...@@ -547,7 +553,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -547,7 +553,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i, nbio = 0; int i, nbio = 0;
int operation; int operation;
struct blk_plug plug;
switch (req->operation) { switch (req->operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
...@@ -660,15 +665,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -660,15 +665,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
*/ */
atomic_set(&pending_req->pendcnt, nbio); atomic_set(&pending_req->pendcnt, nbio);
/* Get a reference count for the disk queue and start sending I/O */
blk_start_plug(&plug);
for (i = 0; i < nbio; i++) for (i = 0; i < nbio; i++)
submit_bio(operation, biolist[i]); submit_bio(operation, biolist[i]);
blk_finish_plug(&plug);
/* Let the I/Os go.. */
if (operation == READ) if (operation == READ)
blkif->st_rd_sect += preq.nr_sects; blkif->st_rd_sect += preq.nr_sects;
else if (operation == WRITE || operation == WRITE_BARRIER) else if (operation == WRITE || operation == WRITE_BARRIER)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment