Commit dc19745a authored by Vlastimil Babka's avatar Vlastimil Babka

Merge branch 'slub-tiny-v1r6' into slab/for-next

Merge my series [1] to deprecate the SLOB allocator.
- Renames CONFIG_SLOB to CONFIG_SLOB_DEPRECATED with deprecation notice.
- The recommended replacement is CONFIG_SLUB, optionally with the new
  CONFIG_SLUB_TINY tweaks for systems with 16MB or less RAM.
- Use cases that stopped working with CONFIG_SLUB_TINY instead of SLOB
  should be reported to linux-mm@kvack.org and slab maintainers,
  otherwise SLOB will be removed in few cycles.

[1] https://lore.kernel.org/all/20221121171202.22080-1-vbabka@suse.cz/
parents 61766652 149b6fa2
...@@ -14,7 +14,8 @@ CONFIG_ARCH_EDB7211=y ...@@ -14,7 +14,8 @@ CONFIG_ARCH_EDB7211=y
CONFIG_ARCH_P720T=y CONFIG_ARCH_P720T=y
CONFIG_AEABI=y CONFIG_AEABI=y
# CONFIG_COREDUMP is not set # CONFIG_COREDUMP is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -13,7 +13,8 @@ CONFIG_CMDLINE="noinitrd root=/dev/mtdblock2 rootfstype=jffs2 fbcon=rotate:1" ...@@ -13,7 +13,8 @@ CONFIG_CMDLINE="noinitrd root=/dev/mtdblock2 rootfstype=jffs2 fbcon=rotate:1"
CONFIG_FPE_NWFPE=y CONFIG_FPE_NWFPE=y
CONFIG_PM=y CONFIG_PM=y
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -25,7 +25,8 @@ CONFIG_ARM_CLPS711X_CPUIDLE=y ...@@ -25,7 +25,8 @@ CONFIG_ARM_CLPS711X_CPUIDLE=y
CONFIG_JUMP_LABEL=y CONFIG_JUMP_LABEL=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
# CONFIG_COREDUMP is not set # CONFIG_COREDUMP is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
......
...@@ -42,7 +42,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y ...@@ -42,7 +42,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=y CONFIG_BINFMT_MISC=y
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
......
...@@ -49,7 +49,8 @@ CONFIG_PARTITION_ADVANCED=y ...@@ -49,7 +49,8 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_LDM_PARTITION=y CONFIG_LDM_PARTITION=y
CONFIG_CMDLINE_PARTITION=y CONFIG_CMDLINE_PARTITION=y
CONFIG_BINFMT_MISC=y CONFIG_BINFMT_MISC=y
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_COMPACTION is not set # CONFIG_COMPACTION is not set
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
......
...@@ -19,7 +19,8 @@ CONFIG_FPE_NWFPE=y ...@@ -19,7 +19,8 @@ CONFIG_FPE_NWFPE=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -26,7 +26,8 @@ CONFIG_MODULE_UNLOAD=y ...@@ -26,7 +26,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLOCK is not set # CONFIG_BLOCK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_NET=y CONFIG_NET=y
......
...@@ -10,7 +10,8 @@ CONFIG_EXPERT=y ...@@ -10,7 +10,8 @@ CONFIG_EXPERT=y
# CONFIG_AIO is not set # CONFIG_AIO is not set
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_MODULES=y CONFIG_MODULES=y
# CONFIG_BLOCK is not set # CONFIG_BLOCK is not set
CONFIG_OPENRISC_BUILTIN_DTB="or1ksim" CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
......
...@@ -16,7 +16,8 @@ CONFIG_EXPERT=y ...@@ -16,7 +16,8 @@ CONFIG_EXPERT=y
# CONFIG_AIO is not set # CONFIG_AIO is not set
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_MODULES=y CONFIG_MODULES=y
# CONFIG_BLOCK is not set # CONFIG_BLOCK is not set
CONFIG_OPENRISC_BUILTIN_DTB="simple_smp" CONFIG_OPENRISC_BUILTIN_DTB="simple_smp"
......
...@@ -25,7 +25,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y ...@@ -25,7 +25,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set # CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y CONFIG_SOC_CANAAN=y
CONFIG_NONPORTABLE=y CONFIG_NONPORTABLE=y
......
...@@ -17,7 +17,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y ...@@ -17,7 +17,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set # CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y CONFIG_SOC_CANAAN=y
CONFIG_NONPORTABLE=y CONFIG_NONPORTABLE=y
......
...@@ -22,7 +22,8 @@ CONFIG_EXPERT=y ...@@ -22,7 +22,8 @@ CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set # CONFIG_MMU is not set
CONFIG_SOC_VIRT=y CONFIG_SOC_VIRT=y
CONFIG_NONPORTABLE=y CONFIG_NONPORTABLE=y
......
...@@ -10,7 +10,8 @@ CONFIG_USER_NS=y ...@@ -10,7 +10,8 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y CONFIG_PID_NS=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
# CONFIG_AIO is not set # CONFIG_AIO is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_MODULES=y CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
......
...@@ -11,7 +11,8 @@ CONFIG_USER_NS=y ...@@ -11,7 +11,8 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y CONFIG_PID_NS=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_MODULES=y CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
......
...@@ -21,7 +21,8 @@ CONFIG_BLK_DEV_INITRD=y ...@@ -21,7 +21,8 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_ELF_CORE is not set # CONFIG_ELF_CORE is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
......
...@@ -9,7 +9,8 @@ CONFIG_LOG_BUF_SHIFT=14 ...@@ -9,7 +9,8 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SHMEM is not set # CONFIG_SHMEM is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7706=y CONFIG_CPU_SUBTYPE_SH7706=y
CONFIG_MEMORY_START=0x0c000000 CONFIG_MEMORY_START=0x0c000000
......
...@@ -20,7 +20,8 @@ CONFIG_USER_NS=y ...@@ -20,7 +20,8 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y CONFIG_PID_NS=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_MODULES=y CONFIG_MODULES=y
......
...@@ -129,7 +129,11 @@ ...@@ -129,7 +129,11 @@
/* The following flags affect the page allocator grouping pages by mobility */ /* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */ /* Objects are reclaimable */
#ifndef CONFIG_SLUB_TINY
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#else
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* /*
...@@ -336,12 +340,17 @@ enum kmalloc_cache_type { ...@@ -336,12 +340,17 @@ enum kmalloc_cache_type {
#endif #endif
#ifndef CONFIG_MEMCG_KMEM #ifndef CONFIG_MEMCG_KMEM
KMALLOC_CGROUP = KMALLOC_NORMAL, KMALLOC_CGROUP = KMALLOC_NORMAL,
#else
KMALLOC_CGROUP,
#endif #endif
#ifdef CONFIG_SLUB_TINY
KMALLOC_RECLAIM = KMALLOC_NORMAL,
#else
KMALLOC_RECLAIM, KMALLOC_RECLAIM,
#endif
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
KMALLOC_DMA, KMALLOC_DMA,
#endif
#ifdef CONFIG_MEMCG_KMEM
KMALLOC_CGROUP,
#endif #endif
NR_KMALLOC_TYPES NR_KMALLOC_TYPES
}; };
......
...@@ -80,8 +80,10 @@ struct kmem_cache { ...@@ -80,8 +80,10 @@ struct kmem_cache {
unsigned int *random_seq; unsigned int *random_seq;
#endif #endif
#ifdef CONFIG_HARDENED_USERCOPY
unsigned int useroffset; /* Usercopy region offset */ unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */ unsigned int usersize; /* Usercopy region size */
#endif
struct kmem_cache_node *node[MAX_NUMNODES]; struct kmem_cache_node *node[MAX_NUMNODES];
}; };
......
...@@ -41,6 +41,7 @@ enum stat_item { ...@@ -41,6 +41,7 @@ enum stat_item {
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
NR_SLUB_STAT_ITEMS }; NR_SLUB_STAT_ITEMS };
#ifndef CONFIG_SLUB_TINY
/* /*
* When changing the layout, make sure freelist and tid are still compatible * When changing the layout, make sure freelist and tid are still compatible
* with this_cpu_cmpxchg_double() alignment requirements. * with this_cpu_cmpxchg_double() alignment requirements.
...@@ -57,6 +58,7 @@ struct kmem_cache_cpu { ...@@ -57,6 +58,7 @@ struct kmem_cache_cpu {
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
}; };
#endif /* CONFIG_SLUB_TINY */
#ifdef CONFIG_SLUB_CPU_PARTIAL #ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial(c) ((c)->partial) #define slub_percpu_partial(c) ((c)->partial)
...@@ -88,7 +90,9 @@ struct kmem_cache_order_objects { ...@@ -88,7 +90,9 @@ struct kmem_cache_order_objects {
* Slab cache management. * Slab cache management.
*/ */
struct kmem_cache { struct kmem_cache {
#ifndef CONFIG_SLUB_TINY
struct kmem_cache_cpu __percpu *cpu_slab; struct kmem_cache_cpu __percpu *cpu_slab;
#endif
/* Used for retrieving partial slabs, etc. */ /* Used for retrieving partial slabs, etc. */
slab_flags_t flags; slab_flags_t flags;
unsigned long min_partial; unsigned long min_partial;
...@@ -136,13 +140,15 @@ struct kmem_cache { ...@@ -136,13 +140,15 @@ struct kmem_cache {
struct kasan_cache kasan_info; struct kasan_cache kasan_info;
#endif #endif
#ifdef CONFIG_HARDENED_USERCOPY
unsigned int useroffset; /* Usercopy region offset */ unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */ unsigned int usersize; /* Usercopy region size */
#endif
struct kmem_cache_node *node[MAX_NUMNODES]; struct kmem_cache_node *node[MAX_NUMNODES];
}; };
#ifdef CONFIG_SYSFS #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
#define SLAB_SUPPORTS_SYSFS #define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *); void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *);
......
...@@ -7,5 +7,6 @@ CONFIG_KERNEL_XZ=y ...@@ -7,5 +7,6 @@ CONFIG_KERNEL_XZ=y
# CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set # CONFIG_KERNEL_LZ4 is not set
# CONFIG_SLAB is not set # CONFIG_SLAB is not set
# CONFIG_SLUB is not set # CONFIG_SLOB_DEPRECATED is not set
CONFIG_SLOB=y CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
...@@ -37,7 +37,7 @@ menuconfig KASAN ...@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS HAVE_ARCH_KASAN_HW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT_ALWAYS_INIT select STACKDEPOT_ALWAYS_INIT
help help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
......
...@@ -219,17 +219,43 @@ config SLUB ...@@ -219,17 +219,43 @@ config SLUB
and has enhanced diagnostics. SLUB is the default choice for and has enhanced diagnostics. SLUB is the default choice for
a slab allocator. a slab allocator.
config SLOB config SLOB_DEPRECATED
depends on EXPERT depends on EXPERT
bool "SLOB (Simple Allocator)" bool "SLOB (Simple Allocator - DEPRECATED)"
depends on !PREEMPT_RT depends on !PREEMPT_RT
help help
Deprecated and scheduled for removal in a few cycles. SLUB
recommended as replacement. CONFIG_SLUB_TINY can be considered
on systems with 16MB or less RAM.
If you need SLOB to stay, please contact linux-mm@kvack.org and
people listed in the SLAB ALLOCATOR section of MAINTAINERS file,
with your use case.
SLOB replaces the stock allocator with a drastically simpler SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but allocator. SLOB is generally more space efficient but
does not perform as well on large systems. does not perform as well on large systems.
endchoice endchoice
config SLOB
bool
default y
depends on SLOB_DEPRECATED
config SLUB_TINY
bool "Configure SLUB for minimal memory footprint"
depends on SLUB && EXPERT
select SLAB_MERGE_DEFAULT
help
Configures the SLUB allocator in a way to achieve minimal memory
footprint, sacrificing scalability, debugging and other features.
This is intended only for the smallest system that had used the
SLOB allocator and is not recommended for systems with more than
16MB RAM.
If unsure, say N.
config SLAB_MERGE_DEFAULT config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged" bool "Allow slab caches to be merged"
default y default y
...@@ -247,7 +273,7 @@ config SLAB_MERGE_DEFAULT ...@@ -247,7 +273,7 @@ config SLAB_MERGE_DEFAULT
config SLAB_FREELIST_RANDOM config SLAB_FREELIST_RANDOM
bool "Randomize slab freelist" bool "Randomize slab freelist"
depends on SLAB || SLUB depends on SLAB || (SLUB && !SLUB_TINY)
help help
Randomizes the freelist order used on creating new pages. This Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab security feature reduces the predictability of the kernel slab
...@@ -255,7 +281,7 @@ config SLAB_FREELIST_RANDOM ...@@ -255,7 +281,7 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata" bool "Harden slab freelist metadata"
depends on SLAB || SLUB depends on SLAB || (SLUB && !SLUB_TINY)
help help
Many kernel heap attacks try to target slab cache metadata and Many kernel heap attacks try to target slab cache metadata and
other infrastructure. This options makes minor performance other infrastructure. This options makes minor performance
...@@ -267,7 +293,7 @@ config SLAB_FREELIST_HARDENED ...@@ -267,7 +293,7 @@ config SLAB_FREELIST_HARDENED
config SLUB_STATS config SLUB_STATS
default n default n
bool "Enable SLUB performance statistics" bool "Enable SLUB performance statistics"
depends on SLUB && SYSFS depends on SLUB && SYSFS && !SLUB_TINY
help help
SLUB statistics are useful to debug SLUBs allocation behavior in SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be order find ways to optimize the allocator. This should never be
...@@ -279,7 +305,7 @@ config SLUB_STATS ...@@ -279,7 +305,7 @@ config SLUB_STATS
config SLUB_CPU_PARTIAL config SLUB_CPU_PARTIAL
default y default y
depends on SLUB && SMP depends on SLUB && SMP && !SLUB_TINY
bool "SLUB per cpu partial cache" bool "SLUB per cpu partial cache"
help help
Per cpu partial caches accelerate objects allocation and freeing Per cpu partial caches accelerate objects allocation and freeing
......
...@@ -56,7 +56,7 @@ config DEBUG_SLAB ...@@ -56,7 +56,7 @@ config DEBUG_SLAB
config SLUB_DEBUG config SLUB_DEBUG
default y default y
bool "Enable SLUB debugging support" if EXPERT bool "Enable SLUB debugging support" if EXPERT
depends on SLUB && SYSFS depends on SLUB && SYSFS && !SLUB_TINY
select STACKDEPOT if STACKTRACE_SUPPORT select STACKDEPOT if STACKTRACE_SUPPORT
help help
SLUB has extensive debug support features. Disabling these can SLUB has extensive debug support features. Disabling these can
......
...@@ -217,8 +217,6 @@ struct kmem_cache { ...@@ -217,8 +217,6 @@ struct kmem_cache {
unsigned int size; /* The aligned/padded/added on size */ unsigned int size; /* The aligned/padded/added on size */
unsigned int align; /* Alignment as calculated */ unsigned int align; /* Alignment as calculated */
slab_flags_t flags; /* Active flags on the slab */ slab_flags_t flags; /* Active flags on the slab */
unsigned int useroffset;/* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
const char *name; /* Slab name for sysfs */ const char *name; /* Slab name for sysfs */
int refcount; /* Use counter */ int refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */ void (*ctor)(void *); /* Called on object slot creation */
......
...@@ -143,8 +143,10 @@ int slab_unmergeable(struct kmem_cache *s) ...@@ -143,8 +143,10 @@ int slab_unmergeable(struct kmem_cache *s)
if (s->ctor) if (s->ctor)
return 1; return 1;
#ifdef CONFIG_HARDENED_USERCOPY
if (s->usersize) if (s->usersize)
return 1; return 1;
#endif
/* /*
* We may have set a slab to be unmergeable during bootstrap. * We may have set a slab to be unmergeable during bootstrap.
...@@ -223,8 +225,10 @@ static struct kmem_cache *create_cache(const char *name, ...@@ -223,8 +225,10 @@ static struct kmem_cache *create_cache(const char *name,
s->size = s->object_size = object_size; s->size = s->object_size = object_size;
s->align = align; s->align = align;
s->ctor = ctor; s->ctor = ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset; s->useroffset = useroffset;
s->usersize = usersize; s->usersize = usersize;
#endif
err = __kmem_cache_create(s, flags); err = __kmem_cache_create(s, flags);
if (err) if (err)
...@@ -317,7 +321,8 @@ kmem_cache_create_usercopy(const char *name, ...@@ -317,7 +321,8 @@ kmem_cache_create_usercopy(const char *name,
flags &= CACHE_CREATE_MASK; flags &= CACHE_CREATE_MASK;
/* Fail closed on bad usersize of useroffset values. */ /* Fail closed on bad usersize of useroffset values. */
if (WARN_ON(!usersize && useroffset) || if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
WARN_ON(!usersize && useroffset) ||
WARN_ON(size < usersize || size - usersize < useroffset)) WARN_ON(size < usersize || size - usersize < useroffset))
usersize = useroffset = 0; usersize = useroffset = 0;
...@@ -595,8 +600,8 @@ void kmem_dump_obj(void *object) ...@@ -595,8 +600,8 @@ void kmem_dump_obj(void *object)
ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
pr_cont(" pointer offset %lu", ptroffset); pr_cont(" pointer offset %lu", ptroffset);
} }
if (kp.kp_slab_cache && kp.kp_slab_cache->usersize) if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
pr_cont(" size %u", kp.kp_slab_cache->usersize); pr_cont(" size %u", kp.kp_slab_cache->object_size);
if (kp.kp_ret) if (kp.kp_ret)
pr_cont(" allocated at %pS\n", kp.kp_ret); pr_cont(" allocated at %pS\n", kp.kp_ret);
else else
...@@ -640,8 +645,10 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, ...@@ -640,8 +645,10 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
align = max(align, size); align = max(align, size);
s->align = calculate_alignment(flags, align, size); s->align = calculate_alignment(flags, align, size);
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset; s->useroffset = useroffset;
s->usersize = usersize; s->usersize = usersize;
#endif
err = __kmem_cache_create(s, flags); err = __kmem_cache_create(s, flags);
...@@ -766,10 +773,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup); ...@@ -766,10 +773,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup);
#define KMALLOC_CGROUP_NAME(sz) #define KMALLOC_CGROUP_NAME(sz)
#endif #endif
#ifndef CONFIG_SLUB_TINY
#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
#else
#define KMALLOC_RCL_NAME(sz)
#endif
#define INIT_KMALLOC_INFO(__size, __short_size) \ #define INIT_KMALLOC_INFO(__size, __short_size) \
{ \ { \
.name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \ KMALLOC_RCL_NAME(__short_size) \
KMALLOC_CGROUP_NAME(__short_size) \ KMALLOC_CGROUP_NAME(__short_size) \
KMALLOC_DMA_NAME(__short_size) \ KMALLOC_DMA_NAME(__short_size) \
.size = __size, \ .size = __size, \
...@@ -855,7 +868,7 @@ void __init setup_kmalloc_cache_index_table(void) ...@@ -855,7 +868,7 @@ void __init setup_kmalloc_cache_index_table(void)
static void __init static void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{ {
if (type == KMALLOC_RECLAIM) { if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
flags |= SLAB_RECLAIM_ACCOUNT; flags |= SLAB_RECLAIM_ACCOUNT;
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
if (mem_cgroup_kmem_disabled()) { if (mem_cgroup_kmem_disabled()) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment