Commit 01263a1f authored by Juergen Gross's avatar Juergen Gross

xen/blkback: use lateeoi irq binding

In order to reduce the chance for the system becoming unresponsive due
to event storms triggered by a misbehaving blkfront use the lateeoi
irq binding for blkback and unmask the event channel only after
processing all pending requests.

As the thread processing requests is used to do purging work in regular
intervals an EOI may be sent only after having received an event. If
there was no pending I/O request flag the EOI as spurious.

This is part of XSA-332.

Cc: stable@vger.kernel.org
Reported-by: default avatarJulien Grall <julien@xen.org>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarWei Liu <wl@xen.org>
parent 54c9de89
...@@ -201,7 +201,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) ...@@ -201,7 +201,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
static int do_block_io_op(struct xen_blkif_ring *ring); static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring, static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req, struct blkif_request *req,
struct pending_req *pending_req); struct pending_req *pending_req);
...@@ -612,6 +612,8 @@ int xen_blkif_schedule(void *arg) ...@@ -612,6 +612,8 @@ int xen_blkif_schedule(void *arg)
struct xen_vbd *vbd = &blkif->vbd; struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout; unsigned long timeout;
int ret; int ret;
bool do_eoi;
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable(); set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
...@@ -636,16 +638,23 @@ int xen_blkif_schedule(void *arg) ...@@ -636,16 +638,23 @@ int xen_blkif_schedule(void *arg)
if (timeout == 0) if (timeout == 0)
goto purge_gnt_list; goto purge_gnt_list;
do_eoi = ring->waiting_reqs;
ring->waiting_reqs = 0; ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */ smp_mb(); /* clear flag *before* checking for work */
ret = do_block_io_op(ring); ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0) if (ret > 0)
ring->waiting_reqs = 1; ring->waiting_reqs = 1;
if (ret == -EACCES) if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq, wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop()); kthread_should_stop());
if (do_eoi && !ring->waiting_reqs) {
xen_irq_lateeoi(ring->irq, eoi_flags);
eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
}
purge_gnt_list: purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent && if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) { time_after(jiffies, ring->next_lru)) {
...@@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio) ...@@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio)
* and transmute it to the block API to hand it over to the proper block disk. * and transmute it to the block API to hand it over to the proper block disk.
*/ */
static int static int
__do_block_io_op(struct xen_blkif_ring *ring) __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{ {
union blkif_back_rings *blk_rings = &ring->blk_rings; union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req; struct blkif_request req;
...@@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring) ...@@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break; break;
/* We've seen a request, so clear spurious eoi flag. */
*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
if (kthread_should_stop()) { if (kthread_should_stop()) {
more_to_do = 1; more_to_do = 1;
break; break;
...@@ -1202,13 +1214,13 @@ __do_block_io_op(struct xen_blkif_ring *ring) ...@@ -1202,13 +1214,13 @@ __do_block_io_op(struct xen_blkif_ring *ring)
} }
static int static int
do_block_io_op(struct xen_blkif_ring *ring) do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{ {
union blkif_back_rings *blk_rings = &ring->blk_rings; union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do; int more_to_do;
do { do {
more_to_do = __do_block_io_op(ring); more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do) if (more_to_do)
break; break;
......
...@@ -246,9 +246,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, ...@@ -246,9 +246,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (req_prod - rsp_prod > size) if (req_prod - rsp_prod > size)
goto fail; goto fail;
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
xen_blkif_be_int, 0, evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
"blkif-backend", ring);
if (err < 0) if (err < 0)
goto fail; goto fail;
ring->irq = err; ring->irq = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment