Commit ac4c3f34 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm thin: sort the deferred cells

Sort the cells in logical block order before processing each cell in
process_thin_deferred_cells().  This significantly improves the ondisk
layout on rotational storage, whereby improving read performance.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 23ca2bb6
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sort.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#define DM_MSG_PREFIX "thin" #define DM_MSG_PREFIX "thin"
...@@ -205,6 +206,8 @@ typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); ...@@ -205,6 +206,8 @@ typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell); typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
#define CELL_SORT_ARRAY_SIZE 8192
struct pool { struct pool {
struct list_head list; struct list_head list;
struct dm_target *ti; /* Only set if a pool target is bound */ struct dm_target *ti; /* Only set if a pool target is bound */
...@@ -252,6 +255,8 @@ struct pool { ...@@ -252,6 +255,8 @@ struct pool {
process_mapping_fn process_prepared_mapping; process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard; process_mapping_fn process_prepared_discard;
struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
}; };
static enum pool_mode get_pool_mode(struct pool *pool); static enum pool_mode get_pool_mode(struct pool *pool);
...@@ -1800,12 +1805,48 @@ static void process_thin_deferred_bios(struct thin_c *tc) ...@@ -1800,12 +1805,48 @@ static void process_thin_deferred_bios(struct thin_c *tc)
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
static int cmp_cells(const void *lhs, const void *rhs)
{
struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
BUG_ON(!lhs_cell->holder);
BUG_ON(!rhs_cell->holder);
if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
return -1;
if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
return 1;
return 0;
}
static unsigned sort_cells(struct pool *pool, struct list_head *cells)
{
unsigned count = 0;
struct dm_bio_prison_cell *cell, *tmp;
list_for_each_entry_safe(cell, tmp, cells, user_list) {
if (count >= CELL_SORT_ARRAY_SIZE)
break;
pool->cell_sort_array[count++] = cell;
list_del(&cell->user_list);
}
sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
return count;
}
static void process_thin_deferred_cells(struct thin_c *tc) static void process_thin_deferred_cells(struct thin_c *tc)
{ {
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
unsigned long flags; unsigned long flags;
struct list_head cells; struct list_head cells;
struct dm_bio_prison_cell *cell, *tmp; struct dm_bio_prison_cell *cell;
unsigned i, j, count;
INIT_LIST_HEAD(&cells); INIT_LIST_HEAD(&cells);
...@@ -1816,27 +1857,34 @@ static void process_thin_deferred_cells(struct thin_c *tc) ...@@ -1816,27 +1857,34 @@ static void process_thin_deferred_cells(struct thin_c *tc)
if (list_empty(&cells)) if (list_empty(&cells))
return; return;
list_for_each_entry_safe(cell, tmp, &cells, user_list) { do {
BUG_ON(!cell->holder); count = sort_cells(tc->pool, &cells);
/* for (i = 0; i < count; i++) {
* If we've got no free new_mapping structs, and processing cell = pool->cell_sort_array[i];
* this bio might require one, we pause until there are some BUG_ON(!cell->holder);
* prepared mappings to process.
*/
if (ensure_next_mapping(pool)) {
spin_lock_irqsave(&tc->lock, flags);
list_add(&cell->user_list, &tc->deferred_cells);
list_splice(&cells, &tc->deferred_cells);
spin_unlock_irqrestore(&tc->lock, flags);
break;
}
if (cell->holder->bi_rw & REQ_DISCARD) /*
pool->process_discard_cell(tc, cell); * If we've got no free new_mapping structs, and processing
else * this bio might require one, we pause until there are some
pool->process_cell(tc, cell); * prepared mappings to process.
} */
if (ensure_next_mapping(pool)) {
for (j = i; j < count; j++)
list_add(&pool->cell_sort_array[j]->user_list, &cells);
spin_lock_irqsave(&tc->lock, flags);
list_splice(&cells, &tc->deferred_cells);
spin_unlock_irqrestore(&tc->lock, flags);
return;
}
if (cell->holder->bi_rw & REQ_DISCARD)
pool->process_discard_cell(tc, cell);
else
pool->process_cell(tc, cell);
}
} while (!list_empty(&cells));
} }
static void thin_get(struct thin_c *tc); static void thin_get(struct thin_c *tc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment