Commit d949a815 authored by Peter Collingbourne's avatar Peter Collingbourne Committed by Andrew Morton

mm: make minimum slab alignment a runtime property

When CONFIG_KASAN_HW_TAGS is enabled we currently increase the minimum
slab alignment to 16.  This happens even if MTE is not supported in
hardware or disabled via kasan=off, which creates an unnecessary memory
overhead in those cases.  Eliminate this overhead by making the minimum
slab alignment a runtime property and only aligning to 16 if KASAN is
enabled at runtime.

On a DragonBoard 845c (non-MTE hardware) with a kernel built with
CONFIG_KASAN_HW_TAGS, waiting for quiescence after a full Android boot I
see the following Slab measurements in /proc/meminfo (median of 3
reboots):

Before: 169020 kB
After:  167304 kB

[akpm@linux-foundation.org: make slab alignment type `unsigned int' to avoid casting]
Link: https://linux-review.googlesource.com/id/I752e725179b43b144153f4b6f584ceb646473ead
Link: https://lkml.kernel.org/r/20220427195820.1716975-2-pcc@google.comSigned-off-by: default avatarPeter Collingbourne <pcc@google.com>
Reviewed-by: default avatarAndrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 534aa1dc
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define __ASM_CACHE_H #define __ASM_CACHE_H
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/mte-def.h>
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
...@@ -49,16 +50,22 @@ ...@@ -49,16 +50,22 @@
*/ */
#define ARCH_DMA_MINALIGN (128) #define ARCH_DMA_MINALIGN (128)
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS) #elif defined(CONFIG_KASAN_HW_TAGS)
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE static inline unsigned int arch_slab_minalign(void)
{
return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
__alignof__(unsigned long long);
}
#define arch_slab_minalign() arch_slab_minalign()
#endif #endif
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#define ICACHEF_ALIASING 0 #define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1 #define ICACHEF_VPIPT 1
extern unsigned long __icache_flags; extern unsigned long __icache_flags;
......
...@@ -209,6 +209,18 @@ void kmem_dump_obj(void *object); ...@@ -209,6 +209,18 @@ void kmem_dump_obj(void *object);
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif #endif
/*
* Arches can define this function if they want to decide the minimum slab
* alignment at runtime. The value returned by the function must be a power
* of two and >= ARCH_SLAB_MINALIGN.
*/
#ifndef arch_slab_minalign
static inline unsigned int arch_slab_minalign(void)
{
return ARCH_SLAB_MINALIGN;
}
#endif
/* /*
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
......
...@@ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
objp += obj_offset(cachep); objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp); cachep->ctor(objp);
if (ARCH_SLAB_MINALIGN && if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp,
pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", arch_slab_minalign());
objp, (int)ARCH_SLAB_MINALIGN);
} }
return objp; return objp;
} }
......
...@@ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags, ...@@ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags,
align = max(align, ralign); align = max(align, ralign);
} }
if (align < ARCH_SLAB_MINALIGN) align = max(align, arch_slab_minalign());
align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *)); return ALIGN(align, sizeof(void *));
} }
......
...@@ -478,9 +478,11 @@ static __always_inline void * ...@@ -478,9 +478,11 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{ {
unsigned int *m; unsigned int *m;
int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int minalign;
void *ret; void *ret;
minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
gfp &= gfp_allowed_mask; gfp &= gfp_allowed_mask;
might_alloc(gfp); might_alloc(gfp);
...@@ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ...@@ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
* kmalloc()'d objects. * kmalloc()'d objects.
*/ */
if (is_power_of_2(size)) if (is_power_of_2(size))
align = max(minalign, (int) size); align = max_t(unsigned int, minalign, size);
if (!size) if (!size)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
...@@ -555,8 +557,11 @@ void kfree(const void *block) ...@@ -555,8 +557,11 @@ void kfree(const void *block)
sp = virt_to_folio(block); sp = virt_to_folio(block);
if (folio_test_slab(sp)) { if (folio_test_slab(sp)) {
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int align = max_t(unsigned int,
ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align); slob_free(m, *m + align);
} else { } else {
unsigned int order = folio_order(sp); unsigned int order = folio_order(sp);
...@@ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree); ...@@ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree);
size_t __ksize(const void *block) size_t __ksize(const void *block)
{ {
struct folio *folio; struct folio *folio;
int align; unsigned int align;
unsigned int *m; unsigned int *m;
BUG_ON(!block); BUG_ON(!block);
...@@ -584,7 +589,8 @@ size_t __ksize(const void *block) ...@@ -584,7 +589,8 @@ size_t __ksize(const void *block)
if (unlikely(!folio_test_slab(folio))) if (unlikely(!folio_test_slab(folio)))
return folio_size(folio); return folio_size(folio);
align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
m = (unsigned int *)(block - align); m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT; return SLOB_UNITS(*m) * SLOB_UNIT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment