Commit edd89818 authored by Uladzislau Rezki (Sony)'s avatar Uladzislau Rezki (Sony) Committed by Andrew Morton

mm: vmalloc: avoid calling __find_vmap_area() twice in __vunmap()

Currently the __vunmap() path calls __find_vmap_area() twice.  Once on
entry to check that the area exists, then inside the remove_vm_area()
function which also performs a new search for the VA.

In order to improvie it from a performance point of view we split
remove_vm_area() into two new parts:
  - find_unlink_vmap_area() that does a search and unlink from tree;
  - __remove_vm_area() that removes without searching.

In this case there is no any functional change for remove_vm_area()
whereas vm_remove_mappings(), where a second search happens, switches to
the __remove_vm_area() variant where the already detached VA is passed as
a parameter, so there is no need to find it again.

Performance wise, i use test_vmalloc.sh with 32 threads doing alloc
free on a 64-CPUs-x86_64-box:

perf without this patch:
-   31.41%     0.50%  vmalloc_test/10  [kernel.vmlinux]    [k] __vunmap
   - 30.92% __vunmap
      - 17.67% _raw_spin_lock
           native_queued_spin_lock_slowpath
      - 12.33% remove_vm_area
         - 11.79% free_vmap_area_noflush
            - 11.18% _raw_spin_lock
                 native_queued_spin_lock_slowpath
        0.76% free_unref_page

perf with this patch:
-   11.35%     0.13%  vmalloc_test/14  [kernel.vmlinux]    [k] __vunmap
   - 11.23% __vunmap
      - 8.28% find_unlink_vmap_area
         - 7.95% _raw_spin_lock
              7.44% native_queued_spin_lock_slowpath
      - 1.93% free_vmap_area_noflush
         - 0.56% _raw_spin_lock
              0.53% native_queued_spin_lock_slowpath
        0.60% __vunmap_range_noflush

__vunmap() consumes around ~20% less CPU cycles on this test.

Also, switch from find_vmap_area() to find_unlink_vmap_area() to prevent a
double access to the vmap_area_lock: one for finding area, second time is
for unlinking from a tree.

[urezki@gmail.com: switch to find_unlink_vmap_area() in vm_unmap_ram()]
  Link: https://lkml.kernel.org/r/20221222190022.134380-2-urezki@gmail.com
Link: https://lkml.kernel.org/r/20221222190022.134380-1-urezki@gmail.comSigned-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b5054174
......@@ -1815,9 +1815,9 @@ static void drain_vmap_area_work(struct work_struct *work)
}
/*
* Free a vmap area, caller ensuring that the area has been unmapped
* and flush_cache_vunmap had been called for the correct range
* previously.
* Free a vmap area, caller ensuring that the area has been unmapped,
* unlinked and flush_cache_vunmap had been called for the correct
* range previously.
*/
static void free_vmap_area_noflush(struct vmap_area *va)
{
......@@ -1825,9 +1825,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
unsigned long va_start = va->va_start;
unsigned long nr_lazy;
spin_lock(&vmap_area_lock);
unlink_va(va, &vmap_area_root);
spin_unlock(&vmap_area_lock);
if (WARN_ON_ONCE(!list_empty(&va->list)))
return;
nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
PAGE_SHIFT, &vmap_lazy_nr);
......@@ -1871,6 +1870,19 @@ struct vmap_area *find_vmap_area(unsigned long addr)
return va;
}
static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
{
struct vmap_area *va;
spin_lock(&vmap_area_lock);
va = __find_vmap_area(addr, &vmap_area_root);
if (va)
unlink_va(va, &vmap_area_root);
spin_unlock(&vmap_area_lock);
return va;
}
/*** Per cpu kva allocator ***/
/*
......@@ -2015,6 +2027,10 @@ static void free_vmap_block(struct vmap_block *vb)
tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
BUG_ON(tmp != vb);
spin_lock(&vmap_area_lock);
unlink_va(vb->va, &vmap_area_root);
spin_unlock(&vmap_area_lock);
free_vmap_area_noflush(vb->va);
kfree_rcu(vb, rcu_head);
}
......@@ -2236,7 +2252,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
return;
}
va = find_vmap_area(addr);
va = find_unlink_vmap_area(addr);
BUG_ON(!va);
debug_check_no_locks_freed((void *)va->va_start,
(va->va_end - va->va_start));
......@@ -2591,6 +2607,20 @@ struct vm_struct *find_vm_area(const void *addr)
return va->vm;
}
static struct vm_struct *__remove_vm_area(struct vmap_area *va)
{
struct vm_struct *vm;
if (!va || !va->vm)
return NULL;
vm = va->vm;
kasan_free_module_shadow(vm);
free_unmap_vmap_area(va);
return vm;
}
/**
* remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address
......@@ -2603,26 +2633,10 @@ struct vm_struct *find_vm_area(const void *addr)
*/
struct vm_struct *remove_vm_area(const void *addr)
{
struct vmap_area *va;
might_sleep();
spin_lock(&vmap_area_lock);
va = __find_vmap_area((unsigned long)addr, &vmap_area_root);
if (va && va->vm) {
struct vm_struct *vm = va->vm;
va->vm = NULL;
spin_unlock(&vmap_area_lock);
kasan_free_module_shadow(vm);
free_unmap_vmap_area(va);
return vm;
}
spin_unlock(&vmap_area_lock);
return NULL;
return __remove_vm_area(
find_unlink_vmap_area((unsigned long) addr));
}
static inline void set_area_direct_map(const struct vm_struct *area,
......@@ -2636,16 +2650,17 @@ static inline void set_area_direct_map(const struct vm_struct *area,
set_direct_map(area->pages[i]);
}
/* Handle removing and resetting vm mappings related to the vm_struct. */
static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
/* Handle removing and resetting vm mappings related to the VA's vm_struct. */
static void va_remove_mappings(struct vmap_area *va, int deallocate_pages)
{
struct vm_struct *area = va->vm;
unsigned long start = ULONG_MAX, end = 0;
unsigned int page_order = vm_area_page_order(area);
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
int flush_dmap = 0;
int i;
remove_vm_area(area->addr);
__remove_vm_area(va);
/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
if (!flush_reset)
......@@ -2690,6 +2705,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
struct vmap_area *va;
if (!addr)
return;
......@@ -2698,19 +2714,20 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
area = find_vm_area(addr);
if (unlikely(!area)) {
va = find_unlink_vmap_area((unsigned long)addr);
if (unlikely(!va)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return;
}
area = va->vm;
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
vm_remove_mappings(area, deallocate_pages);
va_remove_mappings(va, deallocate_pages);
if (deallocate_pages) {
int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment