Commit b53966ff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.10c-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "A short series fixing a regression introduced in 5.9 for running as
  Xen dom0 on a system with NVMe backed storage"

* tag 'for-linus-5.10c-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: don't use page->lru for ZONE_DEVICE memory
  xen: add helpers for caching grant mapping pages
parents b01deddb ee32f323
...@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644); ...@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
#define BLKBACK_INVALID_HANDLE (~0) #define BLKBACK_INVALID_HANDLE (~0)
/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
{ {
return pgrant_timeout && (jiffies - persistent_gnt->last_used >= return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
HZ * pgrant_timeout); HZ * pgrant_timeout);
} }
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{
unsigned long flags;
spin_lock_irqsave(&ring->free_pages_lock, flags);
if (list_empty(&ring->free_pages)) {
BUG_ON(ring->free_pages_num != 0);
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
return gnttab_alloc_pages(1, page);
}
BUG_ON(ring->free_pages_num == 0);
page[0] = list_first_entry(&ring->free_pages, struct page, lru);
list_del(&page[0]->lru);
ring->free_pages_num--;
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
return 0;
}
static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
int num)
{
unsigned long flags;
int i;
spin_lock_irqsave(&ring->free_pages_lock, flags);
for (i = 0; i < num; i++)
list_add(&page[i]->lru, &ring->free_pages);
ring->free_pages_num += num;
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
}
static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
{
/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
struct page *page[NUM_BATCH_FREE_PAGES];
unsigned int num_pages = 0;
unsigned long flags;
spin_lock_irqsave(&ring->free_pages_lock, flags);
while (ring->free_pages_num > num) {
BUG_ON(list_empty(&ring->free_pages));
page[num_pages] = list_first_entry(&ring->free_pages,
struct page, lru);
list_del(&page[num_pages]->lru);
ring->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&ring->free_pages_lock, flags);
num_pages = 0;
}
}
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
if (num_pages != 0)
gnttab_free_pages(num_pages, page);
}
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
...@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro ...@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(ring, pages, segs_to_unmap); gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
} }
...@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) ...@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(ring, pages, segs_to_unmap); gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
} }
kfree(persistent_gnt); kfree(persistent_gnt);
...@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) ...@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (segs_to_unmap > 0) { if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap; unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(ring, pages, segs_to_unmap); gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
} }
} }
...@@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg) ...@@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg)
/* Shrink the free pages pool if it is too large. */ /* Shrink the free pages pool if it is too large. */
if (time_before(jiffies, blkif->buffer_squeeze_end)) if (time_before(jiffies, blkif->buffer_squeeze_end))
shrink_free_pagepool(ring, 0); gnttab_page_cache_shrink(&ring->free_pages, 0);
else else
shrink_free_pagepool(ring, max_buffer_pages); gnttab_page_cache_shrink(&ring->free_pages,
max_buffer_pages);
if (log_stats && time_after(jiffies, ring->st_print)) if (log_stats && time_after(jiffies, ring->st_print))
print_stats(ring); print_stats(ring);
...@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring) ...@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
ring->persistent_gnt_c = 0; ring->persistent_gnt_c = 0;
/* Since we are shutting down remove all pages from the buffer */ /* Since we are shutting down remove all pages from the buffer */
shrink_free_pagepool(ring, 0 /* All */); gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
} }
static unsigned int xen_blkbk_unmap_prepare( static unsigned int xen_blkbk_unmap_prepare(
...@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_ ...@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
but is this the best way to deal with this? */ but is this the best way to deal with this? */
BUG_ON(result); BUG_ON(result);
put_free_pages(ring, data->pages, data->count); gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
make_response(ring, pending_req->id, make_response(ring, pending_req->id,
pending_req->operation, pending_req->status); pending_req->operation, pending_req->status);
free_req(ring, pending_req); free_req(ring, pending_req);
...@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring, ...@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(ring, unmap_pages, invcount); gnttab_page_cache_put(&ring->free_pages, unmap_pages,
invcount);
} }
pages += batch; pages += batch;
num -= batch; num -= batch;
...@@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
pages[i]->page = persistent_gnt->page; pages[i]->page = persistent_gnt->page;
pages[i]->persistent_gnt = persistent_gnt; pages[i]->persistent_gnt = persistent_gnt;
} else { } else {
if (get_free_page(ring, &pages[i]->page)) if (gnttab_page_cache_get(&ring->free_pages,
&pages[i]->page))
goto out_of_memory; goto out_of_memory;
addr = vaddr(pages[i]->page); addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page; pages_to_gnt[segs_to_map] = pages[i]->page;
...@@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
BUG_ON(new_map_idx >= segs_to_map); BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) { if (unlikely(map[new_map_idx].status != 0)) {
pr_debug("invalid buffer -- could not remap it\n"); pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(ring, &pages[seg_idx]->page, 1); gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1; ret |= 1;
goto next; goto next;
...@@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
out_of_memory: out_of_memory:
pr_alert("%s: out of memory\n", __func__); pr_alert("%s: out of memory\n", __func__);
put_free_pages(ring, pages_to_gnt, segs_to_map); gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
for (i = last_map; i < num; i++) for (i = last_map; i < num; i++)
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM; return -ENOMEM;
......
...@@ -288,9 +288,7 @@ struct xen_blkif_ring { ...@@ -288,9 +288,7 @@ struct xen_blkif_ring {
struct work_struct persistent_purge_work; struct work_struct persistent_purge_work;
/* Buffer of free pages to map grant refs. */ /* Buffer of free pages to map grant refs. */
spinlock_t free_pages_lock; struct gnttab_page_cache free_pages;
int free_pages_num;
struct list_head free_pages;
struct work_struct free_work; struct work_struct free_work;
/* Thread shutdown wait queue. */ /* Thread shutdown wait queue. */
......
...@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) ...@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
INIT_LIST_HEAD(&ring->pending_free); INIT_LIST_HEAD(&ring->pending_free);
INIT_LIST_HEAD(&ring->persistent_purge_list); INIT_LIST_HEAD(&ring->persistent_purge_list);
INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants); INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
spin_lock_init(&ring->free_pages_lock); gnttab_page_cache_init(&ring->free_pages);
INIT_LIST_HEAD(&ring->free_pages);
spin_lock_init(&ring->pending_free_lock); spin_lock_init(&ring->pending_free_lock);
init_waitqueue_head(&ring->pending_free_wq); init_waitqueue_head(&ring->pending_free_wq);
...@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) ...@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0); BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
BUG_ON(!list_empty(&ring->persistent_purge_list)); BUG_ON(!list_empty(&ring->persistent_purge_list));
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(!list_empty(&ring->free_pages)); BUG_ON(ring->free_pages.num_pages != 0);
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0); BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false; ring->active = false;
......
...@@ -813,6 +813,129 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) ...@@ -813,6 +813,129 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
} }
EXPORT_SYMBOL_GPL(gnttab_alloc_pages); EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
static inline void cache_init(struct gnttab_page_cache *cache)
{
cache->pages = NULL;
}
static inline bool cache_empty(struct gnttab_page_cache *cache)
{
return !cache->pages;
}
static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
struct page *page;
page = cache->pages;
cache->pages = page->zone_device_data;
return page;
}
static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
page->zone_device_data = cache->pages;
cache->pages = page;
}
#else
static inline void cache_init(struct gnttab_page_cache *cache)
{
INIT_LIST_HEAD(&cache->pages);
}
static inline bool cache_empty(struct gnttab_page_cache *cache)
{
return list_empty(&cache->pages);
}
static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
struct page *page;
page = list_first_entry(&cache->pages, struct page, lru);
list_del(&page->lru);
return page;
}
static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
list_add(&page->lru, &cache->pages);
}
#endif
void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
spin_lock_init(&cache->lock);
cache_init(cache);
cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
{
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
if (cache_empty(cache)) {
spin_unlock_irqrestore(&cache->lock, flags);
return gnttab_alloc_pages(1, page);
}
page[0] = cache_deq(cache);
cache->num_pages--;
spin_unlock_irqrestore(&cache->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
unsigned int num)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&cache->lock, flags);
for (i = 0; i < num; i++)
cache_enq(cache, page[i]);
cache->num_pages += num;
spin_unlock_irqrestore(&cache->lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
{
struct page *page[10];
unsigned int i = 0;
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
while (cache->num_pages > num) {
page[i] = cache_deq(cache);
cache->num_pages--;
if (++i == ARRAY_SIZE(page)) {
spin_unlock_irqrestore(&cache->lock, flags);
gnttab_free_pages(i, page);
i = 0;
spin_lock_irqsave(&cache->lock, flags);
}
}
spin_unlock_irqrestore(&cache->lock, flags);
if (i != 0)
gnttab_free_pages(i, page);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
void gnttab_pages_clear_private(int nr_pages, struct page **pages) void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{ {
int i; int i;
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <xen/xen.h> #include <xen/xen.h>
static DEFINE_MUTEX(list_lock); static DEFINE_MUTEX(list_lock);
static LIST_HEAD(page_list); static struct page *page_list;
static unsigned int list_count; static unsigned int list_count;
static int fill_list(unsigned int nr_pages) static int fill_list(unsigned int nr_pages)
...@@ -84,7 +84,8 @@ static int fill_list(unsigned int nr_pages) ...@@ -84,7 +84,8 @@ static int fill_list(unsigned int nr_pages)
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
list_add(&pg->lru, &page_list); pg->zone_device_data = page_list;
page_list = pg;
list_count++; list_count++;
} }
...@@ -118,12 +119,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -118,12 +119,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
} }
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *pg = list_first_entry_or_null(&page_list, struct page *pg = page_list;
struct page,
lru);
BUG_ON(!pg); BUG_ON(!pg);
list_del(&pg->lru); page_list = pg->zone_device_data;
list_count--; list_count--;
pages[i] = pg; pages[i] = pg;
...@@ -134,7 +133,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -134,7 +133,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int j; unsigned int j;
for (j = 0; j <= i; j++) { for (j = 0; j <= i; j++) {
list_add(&pages[j]->lru, &page_list); pages[j]->zone_device_data = page_list;
page_list = pages[j];
list_count++; list_count++;
} }
goto out; goto out;
...@@ -160,7 +160,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -160,7 +160,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_lock(&list_lock); mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
list_add(&pages[i]->lru, &page_list); pages[i]->zone_device_data = page_list;
page_list = pages[i];
list_count++; list_count++;
} }
mutex_unlock(&list_lock); mutex_unlock(&list_lock);
...@@ -189,7 +190,8 @@ static int __init init(void) ...@@ -189,7 +190,8 @@ static int __init init(void)
struct page *pg = struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j); pfn_to_page(xen_extra_mem[i].start_pfn + j);
list_add(&pg->lru, &page_list); pg->zone_device_data = page_list;
page_list = pg;
list_count++; list_count++;
} }
} }
......
...@@ -99,6 +99,8 @@ struct vscsibk_info { ...@@ -99,6 +99,8 @@ struct vscsibk_info {
struct list_head v2p_entry_lists; struct list_head v2p_entry_lists;
wait_queue_head_t waiting_to_free; wait_queue_head_t waiting_to_free;
struct gnttab_page_cache free_pages;
}; };
/* theoretical maximum of grants for one request */ /* theoretical maximum of grants for one request */
...@@ -188,10 +190,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644); ...@@ -188,10 +190,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages, MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in backend buffer"); "Maximum number of free pages to keep in backend buffer");
static DEFINE_SPINLOCK(free_pages_lock);
static int free_pages_num;
static LIST_HEAD(scsiback_free_pages);
/* Global spinlock to protect scsiback TPG list */ /* Global spinlock to protect scsiback TPG list */
static DEFINE_MUTEX(scsiback_mutex); static DEFINE_MUTEX(scsiback_mutex);
static LIST_HEAD(scsiback_list); static LIST_HEAD(scsiback_list);
...@@ -207,41 +205,6 @@ static void scsiback_put(struct vscsibk_info *info) ...@@ -207,41 +205,6 @@ static void scsiback_put(struct vscsibk_info *info)
wake_up(&info->waiting_to_free); wake_up(&info->waiting_to_free);
} }
static void put_free_pages(struct page **page, int num)
{
unsigned long flags;
int i = free_pages_num + num, n = num;
if (num == 0)
return;
if (i > scsiback_max_buffer_pages) {
n = min(num, i - scsiback_max_buffer_pages);
gnttab_free_pages(n, page + num - n);
n = num - n;
}
spin_lock_irqsave(&free_pages_lock, flags);
for (i = 0; i < n; i++)
list_add(&page[i]->lru, &scsiback_free_pages);
free_pages_num += n;
spin_unlock_irqrestore(&free_pages_lock, flags);
}
static int get_free_page(struct page **page)
{
unsigned long flags;
spin_lock_irqsave(&free_pages_lock, flags);
if (list_empty(&scsiback_free_pages)) {
spin_unlock_irqrestore(&free_pages_lock, flags);
return gnttab_alloc_pages(1, page);
}
page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
list_del(&page[0]->lru);
free_pages_num--;
spin_unlock_irqrestore(&free_pages_lock, flags);
return 0;
}
static unsigned long vaddr_page(struct page *page) static unsigned long vaddr_page(struct page *page)
{ {
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
...@@ -302,7 +265,8 @@ static void scsiback_fast_flush_area(struct vscsibk_pend *req) ...@@ -302,7 +265,8 @@ static void scsiback_fast_flush_area(struct vscsibk_pend *req)
BUG_ON(err); BUG_ON(err);
} }
put_free_pages(req->pages, req->n_grants); gnttab_page_cache_put(&req->info->free_pages, req->pages,
req->n_grants);
req->n_grants = 0; req->n_grants = 0;
} }
...@@ -445,8 +409,8 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req, ...@@ -445,8 +409,8 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
struct vscsibk_info *info = pending_req->info; struct vscsibk_info *info = pending_req->info;
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
if (get_free_page(pg + mapcount)) { if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
put_free_pages(pg, mapcount); gnttab_page_cache_put(&info->free_pages, pg, mapcount);
pr_err("no grant page\n"); pr_err("no grant page\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -796,6 +760,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info, ...@@ -796,6 +760,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
cond_resched(); cond_resched();
} }
gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do); RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
return more_to_do; return more_to_do;
} }
...@@ -1233,6 +1199,8 @@ static int scsiback_remove(struct xenbus_device *dev) ...@@ -1233,6 +1199,8 @@ static int scsiback_remove(struct xenbus_device *dev)
scsiback_release_translation_entry(info); scsiback_release_translation_entry(info);
gnttab_page_cache_shrink(&info->free_pages, 0);
dev_set_drvdata(&dev->dev, NULL); dev_set_drvdata(&dev->dev, NULL);
return 0; return 0;
...@@ -1263,6 +1231,7 @@ static int scsiback_probe(struct xenbus_device *dev, ...@@ -1263,6 +1231,7 @@ static int scsiback_probe(struct xenbus_device *dev,
info->irq = 0; info->irq = 0;
INIT_LIST_HEAD(&info->v2p_entry_lists); INIT_LIST_HEAD(&info->v2p_entry_lists);
spin_lock_init(&info->v2p_lock); spin_lock_init(&info->v2p_lock);
gnttab_page_cache_init(&info->free_pages);
err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u", err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
SG_ALL); SG_ALL);
...@@ -1879,13 +1848,6 @@ static int __init scsiback_init(void) ...@@ -1879,13 +1848,6 @@ static int __init scsiback_init(void)
static void __exit scsiback_exit(void) static void __exit scsiback_exit(void)
{ {
struct page *page;
while (free_pages_num) {
if (get_free_page(&page))
BUG();
gnttab_free_pages(1, &page);
}
target_unregister_template(&scsiback_ops); target_unregister_template(&scsiback_ops);
xenbus_unregister_driver(&scsiback_driver); xenbus_unregister_driver(&scsiback_driver);
} }
......
...@@ -198,6 +198,23 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -198,6 +198,23 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages); int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages); void gnttab_free_pages(int nr_pages, struct page **pages);
struct gnttab_page_cache {
spinlock_t lock;
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
struct page *pages;
#else
struct list_head pages;
#endif
unsigned int num_pages;
};
void gnttab_page_cache_init(struct gnttab_page_cache *cache);
int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
unsigned int num);
void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
unsigned int num);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
struct gnttab_dma_alloc_args { struct gnttab_dma_alloc_args {
/* Device for which DMA memory will be/was allocated. */ /* Device for which DMA memory will be/was allocated. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment