Commit 3467811e authored by Steven Noonan's avatar Steven Noonan Committed by Jens Axboe

xen-blkfront: make blkif_io_lock spinlock per-device

This patch moves the global blkif_io_lock to the per-device structure. The
spinlock seems to exists for two reasons: to disable IRQs when in the interrupt
handlers for blkfront, and to protect the blkfront VBDs when a detachment is
requested.

Having a global blkif_io_lock doesn't make sense given the use case, and it
drastically hinders performance due to contention. All VBDs with pending IOs
have to take the lock in order to get work done, which serializes everything
pretty badly.
Signed-off-by: default avatarSteven Noonan <snoonan@amazon.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent dad5cf65
...@@ -82,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops; ...@@ -82,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops;
*/ */
struct blkfront_info struct blkfront_info
{ {
spinlock_t io_lock;
struct mutex mutex; struct mutex mutex;
struct xenbus_device *xbdev; struct xenbus_device *xbdev;
struct gendisk *gd; struct gendisk *gd;
...@@ -106,8 +107,6 @@ struct blkfront_info ...@@ -106,8 +107,6 @@ struct blkfront_info
int is_ready; int is_ready;
}; };
static DEFINE_SPINLOCK(blkif_io_lock);
static unsigned int nr_minors; static unsigned int nr_minors;
static unsigned long *minors; static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock); static DEFINE_SPINLOCK(minor_lock);
...@@ -418,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) ...@@ -418,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
struct request_queue *rq; struct request_queue *rq;
struct blkfront_info *info = gd->private_data; struct blkfront_info *info = gd->private_data;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock); rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL) if (rq == NULL)
return -1; return -1;
...@@ -635,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) ...@@ -635,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
if (info->rq == NULL) if (info->rq == NULL)
return; return;
spin_lock_irqsave(&blkif_io_lock, flags); spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */ /* No more blkif_request(). */
blk_stop_queue(info->rq); blk_stop_queue(info->rq);
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback); gnttab_cancel_free_callback(&info->callback);
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */ /* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work); flush_work_sync(&info->work);
...@@ -674,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work) ...@@ -674,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work)
{ {
struct blkfront_info *info = container_of(work, struct blkfront_info, work); struct blkfront_info *info = container_of(work, struct blkfront_info, work);
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED) if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
} }
static void blkif_free(struct blkfront_info *info, int suspend) static void blkif_free(struct blkfront_info *info, int suspend)
{ {
/* Prevent new requests being issued until we fix things up. */ /* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
info->connected = suspend ? info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */ /* No more blkif_request(). */
...@@ -691,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -691,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq); blk_stop_queue(info->rq);
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback); gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */ /* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work); flush_work_sync(&info->work);
...@@ -727,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -727,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
struct blkfront_info *info = (struct blkfront_info *)dev_id; struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error; int error;
spin_lock_irqsave(&blkif_io_lock, flags); spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -815,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -815,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -990,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev, ...@@ -990,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev,
} }
mutex_init(&info->mutex); mutex_init(&info->mutex);
spin_lock_init(&info->io_lock);
info->xbdev = dev; info->xbdev = dev;
info->vdevice = vdevice; info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED; info->connected = BLKIF_STATE_DISCONNECTED;
...@@ -1067,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -1067,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected); xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */ /* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
...@@ -1078,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -1078,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Kick any other new requests queued since we resumed */ /* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
return 0; return 0;
} }
...@@ -1276,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -1276,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected); xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */ /* Kick pending requests. */
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
add_disk(info->gd); add_disk(info->gd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment