Commit 736b378b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab updates from Vlastimil Babka:
 "The main change is naturally the SLOB removal. Since its deprecation
  in 6.2 I've seen no complaints so hopefully SLUB_(TINY) works well for
  everyone and we can proceed.

  Besides the code cleanup, the main immediate benefit will be allowing
  kfree() family of function to work on kmem_cache_alloc() objects,
  which was incompatible with SLOB. This includes kfree_rcu() which had
  no kmem_cache_free_rcu() counterpart yet and now it shouldn't be
  necessary anymore.

  Besides that, there are several small code and comment improvements
  from Thomas, Thorsten and Vernon"

* tag 'slab-for-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm/slab: document kfree() as allowed for kmem_cache_alloc() objects
  mm/slob: remove slob.c
  mm/slab: remove CONFIG_SLOB code from slab common code
  mm, pagemap: remove SLOB and SLQB from comments and documentation
  mm, page_flags: remove PG_slob_free
  mm/slob: remove CONFIG_SLOB
  mm/slub: fix help comment of SLUB_DEBUG
  mm: slub: make kobj_type structure constant
  slab: Adjust comment after refactoring of gfp.h
parents 11704531 ed4cdfbe
......@@ -91,9 +91,9 @@ Short descriptions to the page flags
The page is being locked for exclusive access, e.g. by undergoing read/write
IO.
7 - SLAB
The page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator.
When compound page is used, SLUB/SLQB will only set this flag on the head
page; SLOB will not flag it at all.
The page is managed by the SLAB/SLUB kernel memory allocator.
When compound page is used, either will only set this flag on the head
page.
10 - BUDDY
A free memory block managed by the buddy system allocator.
The buddy system organizes free memory in blocks of various orders.
......
......@@ -170,7 +170,16 @@ should be used if a part of the cache might be copied to the userspace.
After the cache is created kmem_cache_alloc() and its convenience
wrappers can allocate memory from that cache.
When the allocated memory is no longer needed it must be freed. You can
use kvfree() for the memory allocated with `kmalloc`, `vmalloc` and
`kvmalloc`. The slab caches should be freed with kmem_cache_free(). And
don't forget to destroy the cache with kmem_cache_destroy().
When the allocated memory is no longer needed it must be freed.
Objects allocated by `kmalloc` can be freed by `kfree` or `kvfree`. Objects
allocated by `kmem_cache_alloc` can be freed with `kmem_cache_free`, `kfree`
or `kvfree`, where the latter two might be more convenient thanks to not
needing the kmem_cache pointer.
The same rules apply to _bulk and _rcu flavors of freeing functions.
Memory allocated by `vmalloc` can be freed with `vfree` or `kvfree`.
Memory allocated by `kvmalloc` can be freed with `kvfree`.
Caches created by `kmem_cache_create` should be freed with
`kmem_cache_destroy` only after freeing all the allocated objects first.
......@@ -125,7 +125,7 @@ u64 stable_page_flags(struct page *page)
/*
* pseudo flags for the well known (anonymous) memory mapped pages
*
* Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
* Note that page->_mapcount is overloaded in SLAB, so the
* simple test in page_mapped() is not enough.
*/
if (!PageSlab(page) && page_mapped(page))
......@@ -165,9 +165,8 @@ u64 stable_page_flags(struct page *page)
/*
* Caveats on high order pages: page->_refcount will only be set
* -1 on the head page; SLUB/SLQB do the same for PG_slab;
* SLOB won't set PG_slab at all on compound pages.
* Caveats on high order pages: PG_buddy and PG_slab will only be set
* on the head page.
*/
if (PageBuddy(page))
u |= 1 << KPF_BUDDY;
......@@ -185,7 +184,7 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
if (PageTail(page) && PageSlab(compound_head(page)))
if (PageTail(page) && PageSlab(page))
u |= 1 << KPF_SLAB;
u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
......
......@@ -174,9 +174,6 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
#ifdef CONFIG_MEMORY_FAILURE
/*
* Compound pages. Stored in first tail page's flags.
......@@ -483,7 +480,6 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
/* Xen */
......
......@@ -976,8 +976,10 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
* Note that the allowable offset might decrease in the future, for example,
* to allow something like kmem_cache_free_rcu().
* The object to be freed can be allocated either by kmalloc() or
* kmem_cache_alloc().
*
* Note that the allowable offset might decrease in the future.
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
......
......@@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void)
#endif
#endif
#ifdef CONFIG_SLOB
/*
* SLOB passes all requests larger than one page to the page allocator.
* No kmalloc array is necessary since objects of different sizes can
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
......@@ -366,7 +353,6 @@ enum kmalloc_cache_type {
NR_KMALLOC_TYPES
};
#ifndef CONFIG_SLOB
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
......@@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
}
static_assert(PAGE_SHIFT <= 20);
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
......@@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
/*
* Caller must not use kfree_bulk() on memory not originally allocated
* by kmalloc(), because the SLOB allocator cannot handle this.
*/
static __always_inline void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(NULL, size, p);
......@@ -526,7 +507,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* to be at least to the size.
*
* The @flags argument may be one of the GFP flags defined at
* include/linux/gfp.h and described at
* include/linux/gfp_types.h and described at
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
* The recommended usage of the @flags is described at
......@@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* Try really hard to succeed the allocation but fail
* eventually.
*/
#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && size) {
......@@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
}
return __kmalloc(size, flags);
}
#else
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
return __kmalloc(size, flags);
}
#endif
#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size) {
......@@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
}
return __kmalloc_node(size, flags, node);
}
#else
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large_node(size, flags, node);
return __kmalloc_node(size, flags, node);
}
#endif
/**
* kmalloc_array - allocate memory for an array.
......
......@@ -945,7 +945,7 @@ config MEMCG
config MEMCG_KMEM
bool
depends on MEMCG && !SLOB
depends on MEMCG
default y
config BLK_CGROUP
......
......@@ -7,6 +7,5 @@ CONFIG_KERNEL_XZ=y
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
# CONFIG_SLAB is not set
# CONFIG_SLOB_DEPRECATED is not set
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
......@@ -238,30 +238,8 @@ config SLUB
and has enhanced diagnostics. SLUB is the default choice for
a slab allocator.
config SLOB_DEPRECATED
depends on EXPERT
bool "SLOB (Simple Allocator - DEPRECATED)"
depends on !PREEMPT_RT
help
Deprecated and scheduled for removal in a few cycles. SLUB
recommended as replacement. CONFIG_SLUB_TINY can be considered
on systems with 16MB or less RAM.
If you need SLOB to stay, please contact linux-mm@kvack.org and
people listed in the SLAB ALLOCATOR section of MAINTAINERS file,
with your use case.
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
does not perform as well on large systems.
endchoice
config SLOB
bool
default y
depends on SLOB_DEPRECATED
config SLUB_TINY
bool "Configure SLUB for minimal memory footprint"
depends on SLUB && EXPERT
......
......@@ -60,9 +60,9 @@ config SLUB_DEBUG
select STACKDEPOT if STACKTRACE_SUPPORT
help
SLUB has extensive debug support features. Disabling these can
result in significant savings in code size. This also disables
SLUB sysfs support. /sys/slab will not exist and there will be
no support for cache validation etc.
result in significant savings in code size. While /sys/kernel/slab
will still exist (with SYSFS enabled), it will not provide e.g. cache
validation.
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
......
......@@ -22,7 +22,6 @@ KCSAN_INSTRUMENT_BARRIERS := y
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
# free pages, or a task is migrated between nodes.
KCOV_INSTRUMENT_slab_common.o := n
KCOV_INSTRUMENT_slob.o := n
KCOV_INSTRUMENT_slab.o := n
KCOV_INSTRUMENT_slub.o := n
KCOV_INSTRUMENT_page_alloc.o := n
......@@ -81,7 +80,6 @@ obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += page_poison.o
......
......@@ -51,14 +51,6 @@ struct slab {
};
unsigned int __unused;
#elif defined(CONFIG_SLOB)
struct list_head slab_list;
void *__unused_1;
void *freelist; /* first free block */
long units;
unsigned int __unused_2;
#else
#error "Unexpected slab allocator configured"
#endif
......@@ -72,11 +64,7 @@ struct slab {
#define SLAB_MATCH(pg, sl) \
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
SLAB_MATCH(flags, __page_flags);
#ifndef CONFIG_SLOB
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
#else
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, memcg_data);
......@@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab)
return PAGE_SIZE << slab_order(slab);
}
#ifdef CONFIG_SLOB
/*
* Common fields provided in kmem_cache by all slab allocators
* This struct is either used directly by the allocator (SLOB)
* or the allocator must include definitions for all fields
* provided in kmem_cache_common in their definition of kmem_cache.
*
* Once we can do anonymous structs (C11 standard) we could put a
* anonymous struct definition in these allocators so that the
* separate allocations in the kmem_cache structure of SLAB and
* SLUB is no longer needed.
*/
struct kmem_cache {
unsigned int object_size;/* The original size of the object */
unsigned int size; /* The aligned/padded/added on size */
unsigned int align; /* Alignment as calculated */
slab_flags_t flags; /* Active flags on the slab */
const char *name; /* Slab name for sysfs */
int refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */
struct list_head list; /* List of all slab caches on the system */
};
#endif /* CONFIG_SLOB */
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
......@@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct {
unsigned int size;
} kmalloc_info[];
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
void create_kmalloc_caches(slab_flags_t);
......@@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller);
void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
#endif
gfp_t kmalloc_fix_flags(gfp_t flags);
......@@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(unsigned size, unsigned align,
slab_flags_t flags, const char *name, void (*ctor)(void *));
#ifndef CONFIG_SLOB
struct kmem_cache *
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *));
slab_flags_t kmem_cache_flags(unsigned int object_size,
slab_flags_t flags, const char *name);
#else
static inline struct kmem_cache *
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{ return NULL; }
static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
slab_flags_t flags, const char *name)
{
return flags;
}
#endif
static inline bool is_kmalloc_cache(struct kmem_cache *s)
{
#ifndef CONFIG_SLOB
return (s->flags & SLAB_KMALLOC);
#else
return false;
#endif
}
/* Legal flag mask for kmem_cache_create(), for various configurations */
......@@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
}
#endif /* CONFIG_MEMCG_KMEM */
#ifndef CONFIG_SLOB
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct slab *slab;
......@@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
void free_large_kmalloc(struct folio *folio, void *object);
#endif /* CONFIG_SLOB */
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
......@@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
#ifndef CONFIG_SLOB
/*
* The slab lists for all objects.
*/
......@@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
for (__node = 0; __node < nr_node_ids; __node++) \
if ((__n = get_node(__s, __node)))
#endif
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
void dump_unreclaimable_slab(void);
......
......@@ -625,7 +625,6 @@ void kmem_dump_obj(void *object)
EXPORT_SYMBOL_GPL(kmem_dump_obj);
#endif
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name,
unsigned int size, slab_flags_t flags,
......@@ -990,12 +989,9 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
/**
* kfree - free previously allocated memory
* @object: pointer returned by kmalloc.
* @object: pointer returned by kmalloc() or kmem_cache_alloc()
*
* If @object is NULL, no operation is performed.
*
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/
void kfree(const void *object)
{
......@@ -1079,7 +1075,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
return ret;
}
EXPORT_SYMBOL(kmalloc_node_trace);
#endif /* !CONFIG_SLOB */
gfp_t kmalloc_fix_flags(gfp_t flags)
{
......
This diff is collapsed.
......@@ -6059,7 +6059,7 @@ static const struct sysfs_ops slab_sysfs_ops = {
.store = slab_attr_store,
};
static struct kobj_type slab_ktype = {
static const struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
.release = kmem_cache_release,
};
......
......@@ -85,7 +85,6 @@
*/
#define KPF_ANON_EXCLUSIVE 47
#define KPF_READAHEAD 48
#define KPF_SLOB_FREE 49
#define KPF_SLUB_FROZEN 50
#define KPF_SLUB_DEBUG 51
#define KPF_FILE 61
......@@ -141,7 +140,6 @@ static const char * const page_flag_names[] = {
[KPF_ANON_EXCLUSIVE] = "d:anon_exclusive",
[KPF_READAHEAD] = "I:readahead",
[KPF_SLOB_FREE] = "P:slob_free",
[KPF_SLUB_FROZEN] = "A:slub_frozen",
[KPF_SLUB_DEBUG] = "E:slub_debug",
......@@ -478,10 +476,8 @@ static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
if ((flags & BIT(ANON)) && (flags & BIT(MAPPEDTODISK)))
flags ^= BIT(MAPPEDTODISK) | BIT(ANON_EXCLUSIVE);
/* SLOB/SLUB overload several page flags */
/* SLUB overloads several page flags */
if (flags & BIT(SLAB)) {
if (flags & BIT(PRIVATE))
flags ^= BIT(PRIVATE) | BIT(SLOB_FREE);
if (flags & BIT(ACTIVE))
flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN);
if (flags & BIT(ERROR))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment