Commit 0f14599c authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

vmalloc: convert to XArray

The radix tree of vmap blocks is simpler to express as an XArray.  Reduces
both the text and data sizes of the object file and eliminates a user of
the radix tree preload API.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Link: http://lkml.kernel.org/r/20200603171448.5894-1-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c89ab04f
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/radix-tree.h> #include <linux/xarray.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
...@@ -1514,12 +1514,11 @@ struct vmap_block { ...@@ -1514,12 +1514,11 @@ struct vmap_block {
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
/* /*
* Radix tree of vmap blocks, indexed by address, to quickly find a vmap block * XArray of vmap blocks, indexed by address, to quickly find a vmap block
* in the free path. Could get rid of this if we change the API to return a * in the free path. Could get rid of this if we change the API to return a
* "cookie" from alloc, to be passed to free. But no big deal yet. * "cookie" from alloc, to be passed to free. But no big deal yet.
*/ */
static DEFINE_SPINLOCK(vmap_block_tree_lock); static DEFINE_XARRAY(vmap_blocks);
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
/* /*
* We should probably have a fallback mechanism to allocate virtual memory * We should probably have a fallback mechanism to allocate virtual memory
...@@ -1576,13 +1575,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) ...@@ -1576,13 +1575,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
return ERR_CAST(va); return ERR_CAST(va);
} }
err = radix_tree_preload(gfp_mask);
if (unlikely(err)) {
kfree(vb);
free_vmap_area(va);
return ERR_PTR(err);
}
vaddr = vmap_block_vaddr(va->va_start, 0); vaddr = vmap_block_vaddr(va->va_start, 0);
spin_lock_init(&vb->lock); spin_lock_init(&vb->lock);
vb->va = va; vb->va = va;
...@@ -1595,11 +1587,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) ...@@ -1595,11 +1587,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
INIT_LIST_HEAD(&vb->free_list); INIT_LIST_HEAD(&vb->free_list);
vb_idx = addr_to_vb_idx(va->va_start); vb_idx = addr_to_vb_idx(va->va_start);
spin_lock(&vmap_block_tree_lock); err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); if (err) {
spin_unlock(&vmap_block_tree_lock); kfree(vb);
BUG_ON(err); free_vmap_area(va);
radix_tree_preload_end(); return ERR_PTR(err);
}
vbq = &get_cpu_var(vmap_block_queue); vbq = &get_cpu_var(vmap_block_queue);
spin_lock(&vbq->lock); spin_lock(&vbq->lock);
...@@ -1613,12 +1606,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) ...@@ -1613,12 +1606,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
static void free_vmap_block(struct vmap_block *vb) static void free_vmap_block(struct vmap_block *vb)
{ {
struct vmap_block *tmp; struct vmap_block *tmp;
unsigned long vb_idx;
vb_idx = addr_to_vb_idx(vb->va->va_start); tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
spin_lock(&vmap_block_tree_lock);
tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
spin_unlock(&vmap_block_tree_lock);
BUG_ON(tmp != vb); BUG_ON(tmp != vb);
free_vmap_area_noflush(vb->va); free_vmap_area_noflush(vb->va);
...@@ -1724,7 +1713,6 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) ...@@ -1724,7 +1713,6 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
static void vb_free(unsigned long addr, unsigned long size) static void vb_free(unsigned long addr, unsigned long size)
{ {
unsigned long offset; unsigned long offset;
unsigned long vb_idx;
unsigned int order; unsigned int order;
struct vmap_block *vb; struct vmap_block *vb;
...@@ -1734,14 +1722,8 @@ static void vb_free(unsigned long addr, unsigned long size) ...@@ -1734,14 +1722,8 @@ static void vb_free(unsigned long addr, unsigned long size)
flush_cache_vunmap(addr, addr + size); flush_cache_vunmap(addr, addr + size);
order = get_order(size); order = get_order(size);
offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
vb_idx = addr_to_vb_idx(addr);
rcu_read_lock();
vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
rcu_read_unlock();
BUG_ON(!vb);
unmap_kernel_range_noflush(addr, size); unmap_kernel_range_noflush(addr, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment