Commit ee32f323 authored by Juergen Gross's avatar Juergen Gross

xen: don't use page->lru for ZONE_DEVICE memory

Commit 9e2369c0 ("xen: add helpers to allocate unpopulated
memory") introduced usage of ZONE_DEVICE memory for foreign memory
mappings.

Unfortunately this collides with using page->lru for Xen backend
private page caches.

Fix that by using page->zone_device_data instead.

Cc: <stable@vger.kernel.org> # 5.9
Fixes: 9e2369c0 ("xen: add helpers to allocate unpopulated memory")
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovksy@oracle.com>
Reviewed-by: default avatarJason Andryuk <jandryuk@gmail.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent ca33479c
......@@ -813,10 +813,63 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
static inline void cache_init(struct gnttab_page_cache *cache)
{
cache->pages = NULL;
}
static inline bool cache_empty(struct gnttab_page_cache *cache)
{
return !cache->pages;
}
static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
struct page *page;
page = cache->pages;
cache->pages = page->zone_device_data;
return page;
}
static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
page->zone_device_data = cache->pages;
cache->pages = page;
}
#else
static inline void cache_init(struct gnttab_page_cache *cache)
{
INIT_LIST_HEAD(&cache->pages);
}
static inline bool cache_empty(struct gnttab_page_cache *cache)
{
return list_empty(&cache->pages);
}
static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
struct page *page;
page = list_first_entry(&cache->pages, struct page, lru);
list_del(&page->lru);
return page;
}
static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
list_add(&page->lru, &cache->pages);
}
#endif
void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->pages);
cache_init(cache);
cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
......@@ -827,13 +880,12 @@ int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
spin_lock_irqsave(&cache->lock, flags);
if (list_empty(&cache->pages)) {
if (cache_empty(cache)) {
spin_unlock_irqrestore(&cache->lock, flags);
return gnttab_alloc_pages(1, page);
}
page[0] = list_first_entry(&cache->pages, struct page, lru);
list_del(&page[0]->lru);
page[0] = cache_deq(cache);
cache->num_pages--;
spin_unlock_irqrestore(&cache->lock, flags);
......@@ -851,7 +903,7 @@ void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
spin_lock_irqsave(&cache->lock, flags);
for (i = 0; i < num; i++)
list_add(&page[i]->lru, &cache->pages);
cache_enq(cache, page[i]);
cache->num_pages += num;
spin_unlock_irqrestore(&cache->lock, flags);
......@@ -867,8 +919,7 @@ void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
spin_lock_irqsave(&cache->lock, flags);
while (cache->num_pages > num) {
page[i] = list_first_entry(&cache->pages, struct page, lru);
list_del(&page[i]->lru);
page[i] = cache_deq(cache);
cache->num_pages--;
if (++i == ARRAY_SIZE(page)) {
spin_unlock_irqrestore(&cache->lock, flags);
......
......@@ -12,7 +12,7 @@
#include <xen/xen.h>
static DEFINE_MUTEX(list_lock);
static LIST_HEAD(page_list);
static struct page *page_list;
static unsigned int list_count;
static int fill_list(unsigned int nr_pages)
......@@ -84,7 +84,8 @@ static int fill_list(unsigned int nr_pages)
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
list_add(&pg->lru, &page_list);
pg->zone_device_data = page_list;
page_list = pg;
list_count++;
}
......@@ -118,12 +119,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
}
for (i = 0; i < nr_pages; i++) {
struct page *pg = list_first_entry_or_null(&page_list,
struct page,
lru);
struct page *pg = page_list;
BUG_ON(!pg);
list_del(&pg->lru);
page_list = pg->zone_device_data;
list_count--;
pages[i] = pg;
......@@ -134,7 +133,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int j;
for (j = 0; j <= i; j++) {
list_add(&pages[j]->lru, &page_list);
pages[j]->zone_device_data = page_list;
page_list = pages[j];
list_count++;
}
goto out;
......@@ -160,7 +160,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
list_add(&pages[i]->lru, &page_list);
pages[i]->zone_device_data = page_list;
page_list = pages[i];
list_count++;
}
mutex_unlock(&list_lock);
......@@ -189,7 +190,8 @@ static int __init init(void)
struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j);
list_add(&pg->lru, &page_list);
pg->zone_device_data = page_list;
page_list = pg;
list_count++;
}
}
......
......@@ -200,7 +200,11 @@ void gnttab_free_pages(int nr_pages, struct page **pages);
struct gnttab_page_cache {
spinlock_t lock;
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
struct page *pages;
#else
struct list_head pages;
#endif
unsigned int num_pages;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment