Commit a66cc28f authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Alasdair G Kergon

dm bufio: prefetch

This patch introduces a new function dm_bufio_prefetch. It prefetches
the specified range of blocks into dm-bufio cache without waiting
for i/o completion.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 67e2e2b2
...@@ -578,7 +578,7 @@ static void write_endio(struct bio *bio, int error) ...@@ -578,7 +578,7 @@ static void write_endio(struct bio *bio, int error)
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
b->write_error = error; b->write_error = error;
if (error) { if (unlikely(error)) {
struct dm_bufio_client *c = b->c; struct dm_bufio_client *c = b->c;
(void)cmpxchg(&c->async_write_error, 0, error); (void)cmpxchg(&c->async_write_error, 0, error);
} }
...@@ -697,13 +697,20 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c) ...@@ -697,13 +697,20 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c)
dm_bufio_lock(c); dm_bufio_lock(c);
} }
enum new_flag {
NF_FRESH = 0,
NF_READ = 1,
NF_GET = 2,
NF_PREFETCH = 3
};
/* /*
* Allocate a new buffer. If the allocation is not possible, wait until * Allocate a new buffer. If the allocation is not possible, wait until
* some other thread frees a buffer. * some other thread frees a buffer.
* *
* May drop the lock and regain it. * May drop the lock and regain it.
*/ */
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c) static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{ {
struct dm_buffer *b; struct dm_buffer *b;
...@@ -726,6 +733,9 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client ...@@ -726,6 +733,9 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
return b; return b;
} }
if (nf == NF_PREFETCH)
return NULL;
if (!list_empty(&c->reserved_buffers)) { if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next, b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list); struct dm_buffer, lru_list);
...@@ -743,9 +753,12 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client ...@@ -743,9 +753,12 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
} }
} }
static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c) static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
{ {
struct dm_buffer *b = __alloc_buffer_wait_no_callback(c); struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
if (!b)
return NULL;
if (c->alloc_callback) if (c->alloc_callback)
c->alloc_callback(b); c->alloc_callback(b);
...@@ -865,32 +878,23 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) ...@@ -865,32 +878,23 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
* Getting a buffer * Getting a buffer
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
enum new_flag {
NF_FRESH = 0,
NF_READ = 1,
NF_GET = 2
};
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
enum new_flag nf, struct dm_buffer **bp, enum new_flag nf, int *need_submit)
int *need_submit)
{ {
struct dm_buffer *b, *new_b = NULL; struct dm_buffer *b, *new_b = NULL;
*need_submit = 0; *need_submit = 0;
b = __find(c, block); b = __find(c, block);
if (b) { if (b)
b->hold_count++; goto found_buffer;
__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
test_bit(B_WRITING, &b->state));
return b;
}
if (nf == NF_GET) if (nf == NF_GET)
return NULL; return NULL;
new_b = __alloc_buffer_wait(c); new_b = __alloc_buffer_wait(c, nf);
if (!new_b)
return NULL;
/* /*
* We've had a period where the mutex was unlocked, so need to * We've had a period where the mutex was unlocked, so need to
...@@ -899,10 +903,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, ...@@ -899,10 +903,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
b = __find(c, block); b = __find(c, block);
if (b) { if (b) {
__free_buffer_wake(new_b); __free_buffer_wake(new_b);
b->hold_count++; goto found_buffer;
__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
test_bit(B_WRITING, &b->state));
return b;
} }
__check_watermark(c); __check_watermark(c);
...@@ -922,6 +923,24 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, ...@@ -922,6 +923,24 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
*need_submit = 1; *need_submit = 1;
return b; return b;
found_buffer:
if (nf == NF_PREFETCH)
return NULL;
/*
* Note: it is essential that we don't wait for the buffer to be
* read if dm_bufio_get function is used. Both dm_bufio_get and
* dm_bufio_prefetch can be used in the driver request routine.
* If the user called both dm_bufio_prefetch and dm_bufio_get on
* the same buffer, it would deadlock if we waited.
*/
if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
return NULL;
b->hold_count++;
__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
test_bit(B_WRITING, &b->state));
return b;
} }
/* /*
...@@ -956,10 +975,10 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, ...@@ -956,10 +975,10 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
struct dm_buffer *b; struct dm_buffer *b;
dm_bufio_lock(c); dm_bufio_lock(c);
b = __bufio_new(c, block, nf, bp, &need_submit); b = __bufio_new(c, block, nf, &need_submit);
dm_bufio_unlock(c); dm_bufio_unlock(c);
if (!b || IS_ERR(b)) if (!b)
return b; return b;
if (need_submit) if (need_submit)
...@@ -1005,13 +1024,47 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, ...@@ -1005,13 +1024,47 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
} }
EXPORT_SYMBOL_GPL(dm_bufio_new); EXPORT_SYMBOL_GPL(dm_bufio_new);
void dm_bufio_prefetch(struct dm_bufio_client *c,
sector_t block, unsigned n_blocks)
{
struct blk_plug plug;
blk_start_plug(&plug);
dm_bufio_lock(c);
for (; n_blocks--; block++) {
int need_submit;
struct dm_buffer *b;
b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
if (unlikely(b != NULL)) {
dm_bufio_unlock(c);
if (need_submit)
submit_io(b, READ, b->block, read_endio);
dm_bufio_release(b);
dm_bufio_cond_resched();
if (!n_blocks)
goto flush_plug;
dm_bufio_lock(c);
}
}
dm_bufio_unlock(c);
flush_plug:
blk_finish_plug(&plug);
}
EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
void dm_bufio_release(struct dm_buffer *b) void dm_bufio_release(struct dm_buffer *b)
{ {
struct dm_bufio_client *c = b->c; struct dm_bufio_client *c = b->c;
dm_bufio_lock(c); dm_bufio_lock(c);
BUG_ON(test_bit(B_READING, &b->state));
BUG_ON(!b->hold_count); BUG_ON(!b->hold_count);
b->hold_count--; b->hold_count--;
...@@ -1024,6 +1077,7 @@ void dm_bufio_release(struct dm_buffer *b) ...@@ -1024,6 +1077,7 @@ void dm_bufio_release(struct dm_buffer *b)
* invalid buffer. * invalid buffer.
*/ */
if ((b->read_error || b->write_error) && if ((b->read_error || b->write_error) &&
!test_bit(B_READING, &b->state) &&
!test_bit(B_WRITING, &b->state) && !test_bit(B_WRITING, &b->state) &&
!test_bit(B_DIRTY, &b->state)) { !test_bit(B_DIRTY, &b->state)) {
__unlink_buffer(b); __unlink_buffer(b);
...@@ -1041,6 +1095,8 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) ...@@ -1041,6 +1095,8 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
dm_bufio_lock(c); dm_bufio_lock(c);
BUG_ON(test_bit(B_READING, &b->state));
if (!test_and_set_bit(B_DIRTY, &b->state)) if (!test_and_set_bit(B_DIRTY, &b->state))
__relink_lru(b, LIST_DIRTY); __relink_lru(b, LIST_DIRTY);
......
...@@ -62,6 +62,14 @@ void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, ...@@ -62,6 +62,14 @@ void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp); struct dm_buffer **bp);
/*
* Prefetch the specified blocks to the cache.
* The function starts to read the blocks and returns without waiting for
* I/O to finish.
*/
void dm_bufio_prefetch(struct dm_bufio_client *c,
sector_t block, unsigned n_blocks);
/* /*
* Release a reference obtained with dm_bufio_{read,get,new}. The data * Release a reference obtained with dm_bufio_{read,get,new}. The data
* pointer and dm_buffer pointer is no longer valid after this call. * pointer and dm_buffer pointer is no longer valid after this call.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment