Commit a3967244 authored by Daniel Vetter's avatar Daniel Vetter Committed by akpm

mm/slab: delete cache_alloc_debugcheck_before()

It only does a might_sleep_if(GFP_RECLAIM) check, which is already covered
by the might_alloc() in slab_pre_alloc_hook().  And all callers of
cache_alloc_debugcheck_before() call that beforehand already.

Link: https://lkml.kernel.org/r/20220605152539.3196045-2-daniel.vetter@ffwll.chSigned-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 446ec838
...@@ -2958,12 +2958,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) ...@@ -2958,12 +2958,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
return ac->entry[--ac->avail]; return ac->entry[--ac->avail];
} }
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
might_sleep_if(gfpflags_allow_blocking(flags));
}
#if DEBUG #if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller) gfp_t flags, void *objp, unsigned long caller)
...@@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ ...@@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
if (unlikely(ptr)) if (unlikely(ptr))
goto out_hooks; goto out_hooks;
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags); local_irq_save(save_flags);
if (nodeid == NUMA_NO_NODE) if (nodeid == NUMA_NO_NODE)
...@@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, ...@@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
if (unlikely(objp)) if (unlikely(objp))
goto out; goto out;
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags); local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags); objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags); local_irq_restore(save_flags);
...@@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (!s) if (!s)
return 0; return 0;
cache_alloc_debugcheck_before(s, flags);
local_irq_disable(); local_irq_disable();
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags); void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment