Commit d4bf0065 authored by Bob Liu's avatar Bob Liu Committed by Konrad Rzeszutek Wilk

xen/blkback: make pool of persistent grants and free pages per-queue

Make pool of persistent grants and free pages per-queue/ring instead of
per-device to get better scalability.

Test was done based on null_blk driver:
dom0: v4.2-rc8 16vcpus 10GB "modprobe null_blk"
domu: v4.2-rc8 16vcpus 10GB

[test]
rw=read
direct=1
ioengine=libaio
bs=4k
time_based
runtime=30
filename=/dev/xvdb
numjobs=16
iodepth=64
iodepth_batch=64
iodepth_batch_complete=64
group_reporting

Results:
iops1: After patch "xen/blkfront: make persistent grants per-queue".
iops2: After this patch.

Queues:			  1 	   4 	  	  8 	 	 16
Iops orig(k):		810 	1064 		780 		700
Iops1(k):		810     1230(~20%)	1024(~20%)	850(~20%)
Iops2(k):		810     1410(~35%)	1354(~75%)      1440(~100%)

With 4 queues after this commit we can get ~75% increase in IOPS, and
performance won't drop if increasing queue numbers.

Please find the respective chart in this link:
https://www.dropbox.com/s/agrcy2pbzbsvmwv/iops.png?dl=0Signed-off-by: default avatarBob Liu <bob.liu@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent d62d8600
...@@ -123,60 +123,60 @@ module_param(log_stats, int, 0644); ...@@ -123,60 +123,60 @@ module_param(log_stats, int, 0644);
/* Number of free pages to remove on each call to gnttab_free_pages */ /* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10 #define NUM_BATCH_FREE_PAGES 10
static inline int get_free_page(struct xen_blkif *blkif, struct page **page) static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&ring->free_pages_lock, flags);
if (list_empty(&blkif->free_pages)) { if (list_empty(&ring->free_pages)) {
BUG_ON(blkif->free_pages_num != 0); BUG_ON(ring->free_pages_num != 0);
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&ring->free_pages_lock, flags);
return gnttab_alloc_pages(1, page); return gnttab_alloc_pages(1, page);
} }
BUG_ON(blkif->free_pages_num == 0); BUG_ON(ring->free_pages_num == 0);
page[0] = list_first_entry(&blkif->free_pages, struct page, lru); page[0] = list_first_entry(&ring->free_pages, struct page, lru);
list_del(&page[0]->lru); list_del(&page[0]->lru);
blkif->free_pages_num--; ring->free_pages_num--;
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&ring->free_pages_lock, flags);
return 0; return 0;
} }
static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
int num) int num)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&ring->free_pages_lock, flags);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
list_add(&page[i]->lru, &blkif->free_pages); list_add(&page[i]->lru, &ring->free_pages);
blkif->free_pages_num += num; ring->free_pages_num += num;
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&ring->free_pages_lock, flags);
} }
static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
{ {
/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
struct page *page[NUM_BATCH_FREE_PAGES]; struct page *page[NUM_BATCH_FREE_PAGES];
unsigned int num_pages = 0; unsigned int num_pages = 0;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&ring->free_pages_lock, flags);
while (blkif->free_pages_num > num) { while (ring->free_pages_num > num) {
BUG_ON(list_empty(&blkif->free_pages)); BUG_ON(list_empty(&ring->free_pages));
page[num_pages] = list_first_entry(&blkif->free_pages, page[num_pages] = list_first_entry(&ring->free_pages,
struct page, lru); struct page, lru);
list_del(&page[num_pages]->lru); list_del(&page[num_pages]->lru);
blkif->free_pages_num--; ring->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) { if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&ring->free_pages_lock, flags);
gnttab_free_pages(num_pages, page); gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&ring->free_pages_lock, flags);
num_pages = 0; num_pages = 0;
} }
} }
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&ring->free_pages_lock, flags);
if (num_pages != 0) if (num_pages != 0)
gnttab_free_pages(num_pages, page); gnttab_free_pages(num_pages, page);
} }
...@@ -199,23 +199,29 @@ static void make_response(struct xen_blkif_ring *ring, u64 id, ...@@ -199,23 +199,29 @@ static void make_response(struct xen_blkif_ring *ring, u64 id,
/* /*
* pers_gnts_lock must be used around all the persistent grant helpers * We don't need locking around the persistent grant helpers
* because blkback may use multi-thread/queue for each backend. * because blkback uses a single-thread for each backend, so we
* can be sure that this functions will never be called recursively.
*
* The only exception to that is put_persistent_grant, that can be called
* from interrupt context (by xen_blkbk_unmap), so we have to use atomic
* bit operations to modify the flags of a persistent grant and to count
* the number of used grants.
*/ */
static int add_persistent_gnt(struct xen_blkif *blkif, static int add_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt) struct persistent_gnt *persistent_gnt)
{ {
struct rb_node **new = NULL, *parent = NULL; struct rb_node **new = NULL, *parent = NULL;
struct persistent_gnt *this; struct persistent_gnt *this;
struct xen_blkif *blkif = ring->blkif;
BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock)); if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
if (!blkif->vbd.overflow_max_grants) if (!blkif->vbd.overflow_max_grants)
blkif->vbd.overflow_max_grants = 1; blkif->vbd.overflow_max_grants = 1;
return -EBUSY; return -EBUSY;
} }
/* Figure out where to put new node */ /* Figure out where to put new node */
new = &blkif->persistent_gnts.rb_node; new = &ring->persistent_gnts.rb_node;
while (*new) { while (*new) {
this = container_of(*new, struct persistent_gnt, node); this = container_of(*new, struct persistent_gnt, node);
...@@ -234,20 +240,19 @@ static int add_persistent_gnt(struct xen_blkif *blkif, ...@@ -234,20 +240,19 @@ static int add_persistent_gnt(struct xen_blkif *blkif,
set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
/* Add new node and rebalance tree. */ /* Add new node and rebalance tree. */
rb_link_node(&(persistent_gnt->node), parent, new); rb_link_node(&(persistent_gnt->node), parent, new);
rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
blkif->persistent_gnt_c++; ring->persistent_gnt_c++;
atomic_inc(&blkif->persistent_gnt_in_use); atomic_inc(&ring->persistent_gnt_in_use);
return 0; return 0;
} }
static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
grant_ref_t gref) grant_ref_t gref)
{ {
struct persistent_gnt *data; struct persistent_gnt *data;
struct rb_node *node = NULL; struct rb_node *node = NULL;
BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock)); node = ring->persistent_gnts.rb_node;
node = blkif->persistent_gnts.rb_node;
while (node) { while (node) {
data = container_of(node, struct persistent_gnt, node); data = container_of(node, struct persistent_gnt, node);
...@@ -261,25 +266,24 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, ...@@ -261,25 +266,24 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
return NULL; return NULL;
} }
set_bit(PERSISTENT_GNT_ACTIVE, data->flags); set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
atomic_inc(&blkif->persistent_gnt_in_use); atomic_inc(&ring->persistent_gnt_in_use);
return data; return data;
} }
} }
return NULL; return NULL;
} }
static void put_persistent_gnt(struct xen_blkif *blkif, static void put_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt) struct persistent_gnt *persistent_gnt)
{ {
BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
pr_alert_ratelimited("freeing a grant already unused\n"); pr_alert_ratelimited("freeing a grant already unused\n");
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
atomic_dec(&blkif->persistent_gnt_in_use); atomic_dec(&ring->persistent_gnt_in_use);
} }
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
unsigned int num) unsigned int num)
{ {
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
...@@ -293,7 +297,6 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -293,7 +297,6 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unmap_data.unmap_ops = unmap; unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL; unmap_data.kunmap_ops = NULL;
BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
foreach_grant_safe(persistent_gnt, n, root, node) { foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle == BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE); BLKBACK_INVALID_HANDLE);
...@@ -311,7 +314,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -311,7 +314,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(ring, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
} }
...@@ -328,17 +331,15 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) ...@@ -328,17 +331,15 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt; struct persistent_gnt *persistent_gnt;
int segs_to_unmap = 0; int segs_to_unmap = 0;
struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
struct gntab_unmap_queue_data unmap_data; struct gntab_unmap_queue_data unmap_data;
unsigned long flags;
unmap_data.pages = pages; unmap_data.pages = pages;
unmap_data.unmap_ops = unmap; unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL; unmap_data.kunmap_ops = NULL;
spin_lock_irqsave(&blkif->pers_gnts_lock, flags); while(!list_empty(&ring->persistent_purge_list)) {
while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&ring->persistent_purge_list,
persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
struct persistent_gnt, struct persistent_gnt,
remove_node); remove_node);
list_del(&persistent_gnt->remove_node); list_del(&persistent_gnt->remove_node);
...@@ -353,45 +354,42 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) ...@@ -353,45 +354,42 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(ring, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
} }
kfree(persistent_gnt); kfree(persistent_gnt);
} }
spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
if (segs_to_unmap > 0) { if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(ring, pages, segs_to_unmap);
} }
} }
static void purge_persistent_gnt(struct xen_blkif *blkif) static void purge_persistent_gnt(struct xen_blkif_ring *ring)
{ {
struct persistent_gnt *persistent_gnt; struct persistent_gnt *persistent_gnt;
struct rb_node *n; struct rb_node *n;
unsigned int num_clean, total; unsigned int num_clean, total;
bool scan_used = false, clean_used = false; bool scan_used = false, clean_used = false;
struct rb_root *root; struct rb_root *root;
unsigned long flags;
spin_lock_irqsave(&blkif->pers_gnts_lock, flags); if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
(blkif->persistent_gnt_c == xen_blkif_max_pgrants && !ring->blkif->vbd.overflow_max_grants)) {
!blkif->vbd.overflow_max_grants)) {
goto out; goto out;
} }
if (work_busy(&blkif->persistent_purge_work)) { if (work_busy(&ring->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
goto out; goto out;
} }
num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
num_clean = min(blkif->persistent_gnt_c, num_clean); num_clean = min(ring->persistent_gnt_c, num_clean);
if ((num_clean == 0) || if ((num_clean == 0) ||
(num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use)))) (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
goto out; goto out;
/* /*
...@@ -407,8 +405,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) ...@@ -407,8 +405,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug("Going to purge %u persistent grants\n", num_clean); pr_debug("Going to purge %u persistent grants\n", num_clean);
BUG_ON(!list_empty(&blkif->persistent_purge_list)); BUG_ON(!list_empty(&ring->persistent_purge_list));
root = &blkif->persistent_gnts; root = &ring->persistent_gnts;
purge_list: purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) { foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle == BUG_ON(persistent_gnt->handle ==
...@@ -427,7 +425,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) ...@@ -427,7 +425,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
rb_erase(&persistent_gnt->node, root); rb_erase(&persistent_gnt->node, root);
list_add(&persistent_gnt->remove_node, list_add(&persistent_gnt->remove_node,
&blkif->persistent_purge_list); &ring->persistent_purge_list);
if (--num_clean == 0) if (--num_clean == 0)
goto finished; goto finished;
} }
...@@ -448,18 +446,14 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) ...@@ -448,18 +446,14 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
goto purge_list; goto purge_list;
} }
blkif->persistent_gnt_c -= (total - num_clean); ring->persistent_gnt_c -= (total - num_clean);
spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags); ring->blkif->vbd.overflow_max_grants = 0;
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */ /* We can defer this work */
schedule_work(&blkif->persistent_purge_work); schedule_work(&ring->persistent_purge_work);
pr_debug("Purged %u/%u\n", (total - num_clean), total); pr_debug("Purged %u/%u\n", (total - num_clean), total);
return;
out: out:
spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
return; return;
} }
...@@ -591,14 +585,16 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) ...@@ -591,14 +585,16 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
* SCHEDULER FUNCTIONS * SCHEDULER FUNCTIONS
*/ */
static void print_stats(struct xen_blkif *blkif) static void print_stats(struct xen_blkif_ring *ring)
{ {
struct xen_blkif *blkif = ring->blkif;
pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu | pg: %4u/%4d\n", " | ds %4llu | pg: %4u/%4d\n",
current->comm, blkif->st_oo_req, current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req, blkif->st_rd_req, blkif->st_wr_req,
blkif->st_f_req, blkif->st_ds_req, blkif->st_f_req, blkif->st_ds_req,
blkif->persistent_gnt_c, ring->persistent_gnt_c,
xen_blkif_max_pgrants); xen_blkif_max_pgrants);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0; blkif->st_rd_req = 0;
...@@ -651,23 +647,23 @@ int xen_blkif_schedule(void *arg) ...@@ -651,23 +647,23 @@ int xen_blkif_schedule(void *arg)
purge_gnt_list: purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent && if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, blkif->next_lru)) { time_after(jiffies, ring->next_lru)) {
purge_persistent_gnt(blkif); purge_persistent_gnt(ring);
blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
} }
/* Shrink if we have more than xen_blkif_max_buffer_pages */ /* Shrink if we have more than xen_blkif_max_buffer_pages */
shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
if (log_stats && time_after(jiffies, blkif->st_print)) if (log_stats && time_after(jiffies, ring->blkif->st_print))
print_stats(blkif); print_stats(ring);
} }
/* Drain pending purge work */ /* Drain pending purge work */
flush_work(&blkif->persistent_purge_work); flush_work(&ring->persistent_purge_work);
if (log_stats) if (log_stats)
print_stats(blkif); print_stats(ring);
ring->xenblkd = NULL; ring->xenblkd = NULL;
xen_blkif_put(blkif); xen_blkif_put(blkif);
...@@ -680,21 +676,16 @@ int xen_blkif_schedule(void *arg) ...@@ -680,21 +676,16 @@ int xen_blkif_schedule(void *arg)
*/ */
void xen_blkbk_free_caches(struct xen_blkif_ring *ring) void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{ {
struct xen_blkif *blkif = ring->blkif;
unsigned long flags;
/* Free all persistent grant pages */ /* Free all persistent grant pages */
spin_lock_irqsave(&blkif->pers_gnts_lock, flags); if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) free_persistent_gnts(ring, &ring->persistent_gnts,
free_persistent_gnts(blkif, &blkif->persistent_gnts, ring->persistent_gnt_c);
blkif->persistent_gnt_c);
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
blkif->persistent_gnt_c = 0; ring->persistent_gnt_c = 0;
spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
/* Since we are shutting down remove all pages from the buffer */ /* Since we are shutting down remove all pages from the buffer */
shrink_free_pagepool(blkif, 0 /* All */); shrink_free_pagepool(ring, 0 /* All */);
} }
static unsigned int xen_blkbk_unmap_prepare( static unsigned int xen_blkbk_unmap_prepare(
...@@ -705,13 +696,10 @@ static unsigned int xen_blkbk_unmap_prepare( ...@@ -705,13 +696,10 @@ static unsigned int xen_blkbk_unmap_prepare(
struct page **unmap_pages) struct page **unmap_pages)
{ {
unsigned int i, invcount = 0; unsigned int i, invcount = 0;
unsigned long flags;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
if (pages[i]->persistent_gnt != NULL) { if (pages[i]->persistent_gnt != NULL) {
spin_lock_irqsave(&ring->blkif->pers_gnts_lock, flags); put_persistent_gnt(ring, pages[i]->persistent_gnt);
put_persistent_gnt(ring->blkif, pages[i]->persistent_gnt);
spin_unlock_irqrestore(&ring->blkif->pers_gnts_lock, flags);
continue; continue;
} }
if (pages[i]->handle == BLKBACK_INVALID_HANDLE) if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
...@@ -736,7 +724,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_ ...@@ -736,7 +724,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
but is this the best way to deal with this? */ but is this the best way to deal with this? */
BUG_ON(result); BUG_ON(result);
put_free_pages(blkif, data->pages, data->count); put_free_pages(ring, data->pages, data->count);
make_response(ring, pending_req->id, make_response(ring, pending_req->id,
pending_req->operation, pending_req->status); pending_req->operation, pending_req->status);
free_req(ring, pending_req); free_req(ring, pending_req);
...@@ -803,7 +791,7 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring, ...@@ -803,7 +791,7 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(ring->blkif, unmap_pages, invcount); put_free_pages(ring, unmap_pages, invcount);
} }
pages += batch; pages += batch;
num -= batch; num -= batch;
...@@ -824,7 +812,6 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -824,7 +812,6 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
int last_map = 0, map_until = 0; int last_map = 0, map_until = 0;
int use_persistent_gnts; int use_persistent_gnts;
struct xen_blkif *blkif = ring->blkif; struct xen_blkif *blkif = ring->blkif;
unsigned long irq_flags;
use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
...@@ -838,11 +825,9 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -838,11 +825,9 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
uint32_t flags; uint32_t flags;
if (use_persistent_gnts) { if (use_persistent_gnts) {
spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
persistent_gnt = get_persistent_gnt( persistent_gnt = get_persistent_gnt(
blkif, ring,
pages[i]->gref); pages[i]->gref);
spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
} }
if (persistent_gnt) { if (persistent_gnt) {
...@@ -853,7 +838,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -853,7 +838,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
pages[i]->page = persistent_gnt->page; pages[i]->page = persistent_gnt->page;
pages[i]->persistent_gnt = persistent_gnt; pages[i]->persistent_gnt = persistent_gnt;
} else { } else {
if (get_free_page(blkif, &pages[i]->page)) if (get_free_page(ring, &pages[i]->page))
goto out_of_memory; goto out_of_memory;
addr = vaddr(pages[i]->page); addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page; pages_to_gnt[segs_to_map] = pages[i]->page;
...@@ -886,7 +871,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -886,7 +871,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
BUG_ON(new_map_idx >= segs_to_map); BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) { if (unlikely(map[new_map_idx].status != 0)) {
pr_debug("invalid buffer -- could not remap it\n"); pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(blkif, &pages[seg_idx]->page, 1); put_free_pages(ring, &pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1; ret |= 1;
goto next; goto next;
...@@ -896,7 +881,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -896,7 +881,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
continue; continue;
} }
if (use_persistent_gnts && if (use_persistent_gnts &&
blkif->persistent_gnt_c < xen_blkif_max_pgrants) { ring->persistent_gnt_c < xen_blkif_max_pgrants) {
/* /*
* We are using persistent grants, the grant is * We are using persistent grants, the grant is
* not mapped but we might have room for it. * not mapped but we might have room for it.
...@@ -914,19 +899,16 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -914,19 +899,16 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->gnt = map[new_map_idx].ref;
persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->handle = map[new_map_idx].handle;
persistent_gnt->page = pages[seg_idx]->page; persistent_gnt->page = pages[seg_idx]->page;
spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags); if (add_persistent_gnt(ring,
if (add_persistent_gnt(blkif,
persistent_gnt)) { persistent_gnt)) {
spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
kfree(persistent_gnt); kfree(persistent_gnt);
persistent_gnt = NULL; persistent_gnt = NULL;
goto next; goto next;
} }
pages[seg_idx]->persistent_gnt = persistent_gnt; pages[seg_idx]->persistent_gnt = persistent_gnt;
pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, blkif->persistent_gnt_c, persistent_gnt->gnt, ring->persistent_gnt_c,
xen_blkif_max_pgrants); xen_blkif_max_pgrants);
spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
goto next; goto next;
} }
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
...@@ -950,7 +932,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -950,7 +932,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
out_of_memory: out_of_memory:
pr_alert("%s: out of memory\n", __func__); pr_alert("%s: out of memory\n", __func__);
put_free_pages(blkif, pages_to_gnt, segs_to_map); put_free_pages(ring, pages_to_gnt, segs_to_map);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -291,6 +291,22 @@ struct xen_blkif_ring { ...@@ -291,6 +291,22 @@ struct xen_blkif_ring {
spinlock_t pending_free_lock; spinlock_t pending_free_lock;
wait_queue_head_t pending_free_wq; wait_queue_head_t pending_free_wq;
/* Tree to store persistent grants. */
spinlock_t pers_gnts_lock;
struct rb_root persistent_gnts;
unsigned int persistent_gnt_c;
atomic_t persistent_gnt_in_use;
unsigned long next_lru;
/* Used by the kworker that offload work from the persistent purge. */
struct list_head persistent_purge_list;
struct work_struct persistent_purge_work;
/* Buffer of free pages to map grant refs. */
spinlock_t free_pages_lock;
int free_pages_num;
struct list_head free_pages;
struct work_struct free_work; struct work_struct free_work;
/* Thread shutdown wait queue. */ /* Thread shutdown wait queue. */
wait_queue_head_t shutdown_wq; wait_queue_head_t shutdown_wq;
...@@ -312,22 +328,6 @@ struct xen_blkif { ...@@ -312,22 +328,6 @@ struct xen_blkif {
struct completion drain_complete; struct completion drain_complete;
atomic_t drain; atomic_t drain;
/* tree to store persistent grants */
spinlock_t pers_gnts_lock;
struct rb_root persistent_gnts;
unsigned int persistent_gnt_c;
atomic_t persistent_gnt_in_use;
unsigned long next_lru;
/* used by the kworker that offload work from the persistent purge */
struct list_head persistent_purge_list;
struct work_struct persistent_purge_work;
/* buffer of free pages to map grant refs */
spinlock_t free_pages_lock;
int free_pages_num;
struct list_head free_pages;
/* statistics */ /* statistics */
unsigned long st_print; unsigned long st_print;
unsigned long long st_rd_req; unsigned long long st_rd_req;
......
...@@ -150,6 +150,10 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) ...@@ -150,6 +150,10 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
spin_lock_init(&ring->blk_ring_lock); spin_lock_init(&ring->blk_ring_lock);
init_waitqueue_head(&ring->wq); init_waitqueue_head(&ring->wq);
INIT_LIST_HEAD(&ring->pending_free); INIT_LIST_HEAD(&ring->pending_free);
INIT_LIST_HEAD(&ring->persistent_purge_list);
INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
spin_lock_init(&ring->free_pages_lock);
INIT_LIST_HEAD(&ring->free_pages);
spin_lock_init(&ring->pending_free_lock); spin_lock_init(&ring->pending_free_lock);
init_waitqueue_head(&ring->pending_free_wq); init_waitqueue_head(&ring->pending_free_wq);
...@@ -175,11 +179,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) ...@@ -175,11 +179,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
atomic_set(&blkif->refcnt, 1); atomic_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete); init_completion(&blkif->drain_complete);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->st_print = jiffies; blkif->st_print = jiffies;
INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
return blkif; return blkif;
} }
...@@ -290,6 +290,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) ...@@ -290,6 +290,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
i++; i++;
} }
BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
BUG_ON(!list_empty(&ring->persistent_purge_list));
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(!list_empty(&ring->free_pages));
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
} }
blkif->nr_ring_pages = 0; blkif->nr_ring_pages = 0;
...@@ -304,13 +310,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) ...@@ -304,13 +310,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
xen_vbd_free(&blkif->vbd); xen_vbd_free(&blkif->vbd);
/* Make sure everything is drained before shutting down */ /* Make sure everything is drained before shutting down */
BUG_ON(blkif->persistent_gnt_c != 0);
BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
BUG_ON(blkif->free_pages_num != 0);
BUG_ON(!list_empty(&blkif->persistent_purge_list));
BUG_ON(!list_empty(&blkif->free_pages));
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
kfree(blkif->rings); kfree(blkif->rings);
kmem_cache_free(xen_blkif_cachep, blkif); kmem_cache_free(xen_blkif_cachep, blkif);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment