Commit b32d4582 authored by Nathan Huckleberry's avatar Nathan Huckleberry Committed by Mike Snitzer

dm bufio: Add DM_BUFIO_CLIENT_NO_SLEEP flag

Add an optional flag that ensures dm_bufio_client does not sleep
(primary focus is to service dm_bufio_get without sleeping). This
allows the dm-bufio cache to be queried from interrupt context.

To ensure that dm-bufio does not sleep, dm-bufio must use a spinlock
instead of a mutex. Additionally, to avoid deadlocks, special care
must be taken so that dm-bufio does not sleep while holding the
spinlock.

But again: the scope of this no_sleep is initially confined to
dm_bufio_get, so __alloc_buffer_wait_no_callback is _not_ changed to
avoid sleeping because __bufio_new avoids allocation for NF_GET.
Signed-off-by: default avatarNathan Huckleberry <nhuck@google.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 0fcb100d
...@@ -81,6 +81,8 @@ ...@@ -81,6 +81,8 @@
*/ */
struct dm_bufio_client { struct dm_bufio_client {
struct mutex lock; struct mutex lock;
spinlock_t spinlock;
unsigned long spinlock_flags;
struct list_head lru[LIST_SIZE]; struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE]; unsigned long n_buffers[LIST_SIZE];
...@@ -90,6 +92,7 @@ struct dm_bufio_client { ...@@ -90,6 +92,7 @@ struct dm_bufio_client {
s8 sectors_per_block_bits; s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *); void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *); void (*write_callback)(struct dm_buffer *);
bool no_sleep;
struct kmem_cache *slab_buffer; struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache; struct kmem_cache *slab_cache;
...@@ -167,17 +170,26 @@ struct dm_buffer { ...@@ -167,17 +170,26 @@ struct dm_buffer {
static void dm_bufio_lock(struct dm_bufio_client *c) static void dm_bufio_lock(struct dm_bufio_client *c)
{ {
mutex_lock_nested(&c->lock, dm_bufio_in_request()); if (c->no_sleep)
spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request());
else
mutex_lock_nested(&c->lock, dm_bufio_in_request());
} }
static int dm_bufio_trylock(struct dm_bufio_client *c) static int dm_bufio_trylock(struct dm_bufio_client *c)
{ {
return mutex_trylock(&c->lock); if (c->no_sleep)
return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags);
else
return mutex_trylock(&c->lock);
} }
static void dm_bufio_unlock(struct dm_bufio_client *c) static void dm_bufio_unlock(struct dm_bufio_client *c)
{ {
mutex_unlock(&c->lock); if (c->no_sleep)
spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags);
else
mutex_unlock(&c->lock);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
...@@ -1748,12 +1760,16 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign ...@@ -1748,12 +1760,16 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback; c->alloc_callback = alloc_callback;
c->write_callback = write_callback; c->write_callback = write_callback;
if (flags & DM_BUFIO_CLIENT_NO_SLEEP)
c->no_sleep = true;
for (i = 0; i < LIST_SIZE; i++) { for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]); INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0; c->n_buffers[i] = 0;
} }
mutex_init(&c->lock); mutex_init(&c->lock);
spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers); INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers; c->need_reserved_buffers = reserved_buffers;
......
...@@ -17,6 +17,11 @@ ...@@ -17,6 +17,11 @@
struct dm_bufio_client; struct dm_bufio_client;
struct dm_buffer; struct dm_buffer;
/*
* Flags for dm_bufio_client_create
*/
#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
/* /*
* Create a buffered IO cache on a given device * Create a buffered IO cache on a given device
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment