Commit ff4b156f authored by David Vrabel's avatar David Vrabel

xen/grant-table: add helpers for allocating pages

Add gnttab_alloc_pages() and gnttab_free_pages() to allocate/free pages
suitable to for granted maps.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
parent 0ae65f49
...@@ -100,7 +100,7 @@ module_param(log_stats, int, 0644); ...@@ -100,7 +100,7 @@ module_param(log_stats, int, 0644);
#define BLKBACK_INVALID_HANDLE (~0) #define BLKBACK_INVALID_HANDLE (~0)
/* Number of free pages to remove on each call to free_xenballooned_pages */ /* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10 #define NUM_BATCH_FREE_PAGES 10
static inline int get_free_page(struct xen_blkif *blkif, struct page **page) static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
...@@ -111,7 +111,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page) ...@@ -111,7 +111,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
if (list_empty(&blkif->free_pages)) { if (list_empty(&blkif->free_pages)) {
BUG_ON(blkif->free_pages_num != 0); BUG_ON(blkif->free_pages_num != 0);
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
return alloc_xenballooned_pages(1, page, false); return gnttab_alloc_pages(1, page);
} }
BUG_ON(blkif->free_pages_num == 0); BUG_ON(blkif->free_pages_num == 0);
page[0] = list_first_entry(&blkif->free_pages, struct page, lru); page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
...@@ -151,14 +151,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) ...@@ -151,14 +151,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
blkif->free_pages_num--; blkif->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) { if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
free_xenballooned_pages(num_pages, page); gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&blkif->free_pages_lock, flags);
num_pages = 0; num_pages = 0;
} }
} }
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
if (num_pages != 0) if (num_pages != 0)
free_xenballooned_pages(num_pages, page); gnttab_free_pages(num_pages, page);
} }
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
......
...@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) ...@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
* better enable it. The long term solution would be to use just a * better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning * bunch of valid page descriptors, without dependency on ballooning
*/ */
err = alloc_xenballooned_pages(MAX_PENDING_REQS, err = gnttab_alloc_pages(MAX_PENDING_REQS,
queue->mmap_pages, queue->mmap_pages);
false);
if (err) { if (err) {
netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
return -ENOMEM; return -ENOMEM;
...@@ -662,7 +661,7 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -662,7 +661,7 @@ void xenvif_disconnect(struct xenvif *vif)
*/ */
void xenvif_deinit_queue(struct xenvif_queue *queue) void xenvif_deinit_queue(struct xenvif_queue *queue)
{ {
free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
} }
void xenvif_free(struct xenvif *vif) void xenvif_free(struct xenvif *vif)
......
...@@ -119,7 +119,7 @@ static void gntdev_free_map(struct grant_map *map) ...@@ -119,7 +119,7 @@ static void gntdev_free_map(struct grant_map *map)
return; return;
if (map->pages) if (map->pages)
free_xenballooned_pages(map->count, map->pages); gnttab_free_pages(map->count, map->pages);
kfree(map->pages); kfree(map->pages);
kfree(map->grants); kfree(map->grants);
kfree(map->map_ops); kfree(map->map_ops);
...@@ -152,7 +152,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) ...@@ -152,7 +152,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages) NULL == add->pages)
goto err; goto err;
if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) if (gnttab_alloc_pages(count, add->pages))
goto err; goto err;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <xen/hvc-console.h> #include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h> #include <xen/swiotlb-xen.h>
#include <xen/balloon.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
...@@ -671,6 +672,34 @@ void gnttab_free_auto_xlat_frames(void) ...@@ -671,6 +672,34 @@ void gnttab_free_auto_xlat_frames(void)
} }
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
/**
* gnttab_alloc_pages - alloc pages suitable for grant mapping into
* @nr_pages: number of pages to alloc
* @pages: returns the pages
*/
int gnttab_alloc_pages(int nr_pages, struct page **pages)
{
int ret;
ret = alloc_xenballooned_pages(nr_pages, pages, false);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(gnttab_alloc_pages);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
* @nr_pages; number of pages to free
* @pages: the pages
*/
void gnttab_free_pages(int nr_pages, struct page **pages)
{
free_xenballooned_pages(nr_pages, pages);
}
EXPORT_SYMBOL(gnttab_free_pages);
/* Handling of paged out grant targets (GNTST_eagain) */ /* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256 #define MAX_DELAY 256
static inline void static inline void
......
...@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num) ...@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
return; return;
if (i > scsiback_max_buffer_pages) { if (i > scsiback_max_buffer_pages) {
n = min(num, i - scsiback_max_buffer_pages); n = min(num, i - scsiback_max_buffer_pages);
free_xenballooned_pages(n, page + num - n); gnttab_free_pages(n, page + num - n);
n = num - n; n = num - n;
} }
spin_lock_irqsave(&free_pages_lock, flags); spin_lock_irqsave(&free_pages_lock, flags);
...@@ -244,7 +244,7 @@ static int get_free_page(struct page **page) ...@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
spin_lock_irqsave(&free_pages_lock, flags); spin_lock_irqsave(&free_pages_lock, flags);
if (list_empty(&scsiback_free_pages)) { if (list_empty(&scsiback_free_pages)) {
spin_unlock_irqrestore(&free_pages_lock, flags); spin_unlock_irqrestore(&free_pages_lock, flags);
return alloc_xenballooned_pages(1, page, false); return gnttab_alloc_pages(1, page);
} }
page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
list_del(&page[0]->lru); list_del(&page[0]->lru);
...@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void) ...@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
while (free_pages_num) { while (free_pages_num) {
if (get_free_page(&page)) if (get_free_page(&page))
BUG(); BUG();
free_xenballooned_pages(1, &page); gnttab_free_pages(1, &page);
} }
scsiback_deregister_configfs(); scsiback_deregister_configfs();
xenbus_unregister_driver(&scsiback_driver); xenbus_unregister_driver(&scsiback_driver);
......
...@@ -163,6 +163,9 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -163,6 +163,9 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment