Commit 01e2e839 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

mm: remove __vfree_deferred

Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on
from vfree if called from interrupt context so that the extra low-level
helper can be avoided.

Link: https://lkml.kernel.org/r/20230121071051.1143058-4-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f41f036b
...@@ -2754,20 +2754,6 @@ static void __vunmap(const void *addr, int deallocate_pages) ...@@ -2754,20 +2754,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
kfree(area); kfree(area);
} }
static inline void __vfree_deferred(const void *addr)
{
/*
* Use raw_cpu_ptr() because this can be called from preemptible
* context. Preemption is absolutely fine here, because the llist_add()
* implementation is lockless, so it works even if we are adding to
* another cpu's list. schedule_work() should be fine with this too.
*/
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
if (llist_add((struct llist_node *)addr, &p->list))
schedule_work(&p->wq);
}
/** /**
* vfree_atomic - release memory allocated by vmalloc() * vfree_atomic - release memory allocated by vmalloc()
* @addr: memory base address * @addr: memory base address
...@@ -2777,13 +2763,19 @@ static inline void __vfree_deferred(const void *addr) ...@@ -2777,13 +2763,19 @@ static inline void __vfree_deferred(const void *addr)
*/ */
void vfree_atomic(const void *addr) void vfree_atomic(const void *addr)
{ {
BUG_ON(in_nmi()); struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
BUG_ON(in_nmi());
kmemleak_free(addr); kmemleak_free(addr);
if (!addr) /*
return; * Use raw_cpu_ptr() because this can be called from preemptible
__vfree_deferred(addr); * context. Preemption is absolutely fine here, because the llist_add()
* implementation is lockless, so it works even if we are adding to
* another cpu's list. schedule_work() should be fine with this too.
*/
if (addr && llist_add((struct llist_node *)addr, &p->list))
schedule_work(&p->wq);
} }
/** /**
...@@ -2805,17 +2797,16 @@ void vfree_atomic(const void *addr) ...@@ -2805,17 +2797,16 @@ void vfree_atomic(const void *addr)
*/ */
void vfree(const void *addr) void vfree(const void *addr)
{ {
BUG_ON(in_nmi()); if (unlikely(in_interrupt())) {
vfree_atomic(addr);
return;
}
BUG_ON(in_nmi());
kmemleak_free(addr); kmemleak_free(addr);
might_sleep();
might_sleep_if(!in_interrupt()); if (addr)
if (!addr)
return;
if (unlikely(in_interrupt()))
__vfree_deferred(addr);
else
__vunmap(addr, 1); __vunmap(addr, 1);
} }
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment