slub.c 177 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Christoph Lameter's avatar
Christoph Lameter committed
2 3 4 5
/*
 * SLUB: A slab allocator that limits cache line use instead of queuing
 * objects in per cpu and per node lists.
 *
6
 * The allocator synchronizes using per slab locks or atomic operations
7
 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter's avatar
Christoph Lameter committed
8
 *
Christoph Lameter's avatar
Christoph Lameter committed
9
 * (C) 2007 SGI, Christoph Lameter
10
 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter's avatar
Christoph Lameter committed
11 12 13
 */

#include <linux/mm.h>
14
#include <linux/swap.h> /* mm_account_reclaimed_pages() */
Christoph Lameter's avatar
Christoph Lameter committed
15 16 17
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
Andrew Morton's avatar
Andrew Morton committed
18
#include <linux/swab.h>
Christoph Lameter's avatar
Christoph Lameter committed
19 20
#include <linux/bitops.h>
#include <linux/slab.h>
21
#include "slab.h"
22
#include <linux/proc_fs.h>
Christoph Lameter's avatar
Christoph Lameter committed
23
#include <linux/seq_file.h>
24
#include <linux/kasan.h>
25
#include <linux/kmsan.h>
Christoph Lameter's avatar
Christoph Lameter committed
26 27 28 29
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
30
#include <linux/stackdepot.h>
31
#include <linux/debugobjects.h>
Christoph Lameter's avatar
Christoph Lameter committed
32
#include <linux/kallsyms.h>
33
#include <linux/kfence.h>
34
#include <linux/memory.h>
Roman Zippel's avatar
Roman Zippel committed
35
#include <linux/math64.h>
Akinobu Mita's avatar
Akinobu Mita committed
36
#include <linux/fault-inject.h>
37
#include <linux/kmemleak.h>
38
#include <linux/stacktrace.h>
39
#include <linux/prefetch.h>
40
#include <linux/memcontrol.h>
41
#include <linux/random.h>
42
#include <kunit/test.h>
43
#include <kunit/test-bug.h>
44
#include <linux/sort.h>
Christoph Lameter's avatar
Christoph Lameter committed
45

46
#include <linux/debugfs.h>
47 48
#include <trace/events/kmem.h>

49 50
#include "internal.h"

Christoph Lameter's avatar
Christoph Lameter committed
51 52
/*
 * Lock order:
53
 *   1. slab_mutex (Global Mutex)
54 55
 *   2. node->list_lock (Spinlock)
 *   3. kmem_cache->cpu_slab->lock (Local lock)
56
 *   4. slab_lock(slab) (Only on some arches)
57
 *   5. object_map_lock (Only for debugging)
Christoph Lameter's avatar
Christoph Lameter committed
58
 *
59
 *   slab_mutex
60
 *
61
 *   The role of the slab_mutex is to protect the list of all the slabs
62
 *   and to synchronize major metadata changes to slab cache structures.
63 64 65 66 67 68
 *   Also synchronizes memory hotplug callbacks.
 *
 *   slab_lock
 *
 *   The slab_lock is a wrapper around the page lock, thus it is a bit
 *   spinlock.
69
 *
70 71 72
 *   The slab_lock is only used on arches that do not have the ability
 *   to do a cmpxchg_double. It only protects:
 *
73 74 75 76
 *	A. slab->freelist	-> List of free objects in a slab
 *	B. slab->inuse		-> Number of objects in use
 *	C. slab->objects	-> Number of objects in slab
 *	D. slab->frozen		-> frozen state
77
 *
78 79
 *   Frozen slabs
 *
80 81 82
 *   If a slab is frozen then it is exempt from list management. It is
 *   the cpu slab which is actively allocated from by the processor that
 *   froze it and it is not on any list. The processor that froze the
83
 *   slab is the one who can perform list operations on the slab. Other
84 85
 *   processors may put objects onto the freelist but the processor that
 *   froze the slab is the only one that can retrieve the objects from the
86
 *   slab's freelist.
Christoph Lameter's avatar
Christoph Lameter committed
87
 *
88 89 90 91 92 93 94 95 96 97 98 99 100 101
 *   CPU partial slabs
 *
 *   The partially empty slabs cached on the CPU partial list are used
 *   for performance reasons, which speeds up the allocation process.
 *   These slabs are not frozen, but are also exempt from list management,
 *   by clearing the PG_workingset flag when moving out of the node
 *   partial list. Please see __slab_free() for more details.
 *
 *   To sum up, the current scheme is:
 *   - node partial slab: PG_Workingset && !frozen
 *   - cpu partial slab: !PG_Workingset && !frozen
 *   - cpu slab: !PG_Workingset && frozen
 *   - full slab: !PG_Workingset && !frozen
 *
102 103
 *   list_lock
 *
Christoph Lameter's avatar
Christoph Lameter committed
104 105 106 107 108 109 110 111 112 113 114
 *   The list_lock protects the partial and full list on each node and
 *   the partial slab counter. If taken then no new slabs may be added or
 *   removed from the lists nor make the number of partial slabs be modified.
 *   (Note that the total number of slabs is an atomic value that may be
 *   modified without taking the list lock).
 *
 *   The list_lock is a centralized lock and thus we avoid taking it as
 *   much as possible. As long as SLUB does not have to handle partial
 *   slabs, operations can continue without any centralized lock. F.e.
 *   allocating a long series of objects that fill up slabs does not require
 *   the list lock.
115
 *
116 117 118
 *   For debug caches, all allocations are forced to go through a list_lock
 *   protected region to serialize against concurrent validation.
 *
119 120 121 122 123 124
 *   cpu_slab->lock local lock
 *
 *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
 *   except the stat counters. This is a percpu structure manipulated only by
 *   the local cpu, so the lock protects against being preempted or interrupted
 *   by an irq. Fast path operations rely on lockless operations instead.
125 126 127 128 129
 *
 *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
 *   which means the lockless fastpath cannot be used as it might interfere with
 *   an in-progress slow path operations. In this case the local lock is always
 *   taken but it still utilizes the freelist for the common operations.
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
 *
 *   lockless fastpaths
 *
 *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
 *   are fully lockless when satisfied from the percpu slab (and when
 *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
 *   They also don't disable preemption or migration or irqs. They rely on
 *   the transaction id (tid) field to detect being preempted or moved to
 *   another cpu.
 *
 *   irq, preemption, migration considerations
 *
 *   Interrupts are disabled as part of list_lock or local_lock operations, or
 *   around the slab_lock operation, in order to make the slab allocator safe
 *   to use in the context of an irq.
 *
 *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
 *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
 *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
 *   doesn't have to be revalidated in each section protected by the local lock.
Christoph Lameter's avatar
Christoph Lameter committed
150 151 152 153
 *
 * SLUB assigns one slab for allocation to each processor.
 * Allocations only occur from these slabs called cpu slabs.
 *
Christoph Lameter's avatar
Christoph Lameter committed
154 155
 * Slabs with free elements are kept on a partial list and during regular
 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter's avatar
Christoph Lameter committed
156
 * freed then the slab will show up again on the partial lists.
Christoph Lameter's avatar
Christoph Lameter committed
157 158
 * We track full slabs for debugging purposes though because otherwise we
 * cannot scan all objects.
Christoph Lameter's avatar
Christoph Lameter committed
159 160 161 162 163
 *
 * Slabs are freed when they become empty. Teardown and setup is
 * minimal so we rely on the page allocators per cpu caches for
 * fast frees and allocs.
 *
164
 * slab->frozen		The slab is frozen and exempt from list processing.
165 166 167 168 169 170 171 172 173 174 175
 * 			This means that the slab is dedicated to a purpose
 * 			such as satisfying allocations for a specific
 * 			processor. Objects may be freed in the slab while
 * 			it is frozen but slab_free will then skip the usual
 * 			list operations. It is up to the processor holding
 * 			the slab to integrate the slab into the slab lists
 * 			when the slab is no longer needed.
 *
 * 			One use of this flag is to mark slabs that are
 * 			used for allocations. Then such a slab becomes a cpu
 * 			slab. The cpu slab may be equipped with an additional
176
 * 			freelist that allows lockless access to
177 178
 * 			free objects in addition to the regular freelist
 * 			that requires the slab lock.
Christoph Lameter's avatar
Christoph Lameter committed
179
 *
Yu Zhao's avatar
Yu Zhao committed
180
 * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
Christoph Lameter's avatar
Christoph Lameter committed
181
 * 			options set. This moves	slab handling out of
182
 * 			the fast path and disables lockless freelists.
Christoph Lameter's avatar
Christoph Lameter committed
183 184
 */

185 186 187 188 189
/*
 * We could simply use migrate_disable()/enable() but as long as it's a
 * function call even on !PREEMPT_RT, use inline preempt_disable() there.
 */
#ifndef CONFIG_PREEMPT_RT
190 191 192
#define slub_get_cpu_ptr(var)		get_cpu_ptr(var)
#define slub_put_cpu_ptr(var)		put_cpu_ptr(var)
#define USE_LOCKLESS_FAST_PATH()	(true)
193 194 195 196 197 198 199 200 201 202 203
#else
#define slub_get_cpu_ptr(var)		\
({					\
	migrate_disable();		\
	this_cpu_ptr(var);		\
})
#define slub_put_cpu_ptr(var)		\
do {					\
	(void)(var);			\
	migrate_enable();		\
} while (0)
204
#define USE_LOCKLESS_FAST_PATH()	(false)
205 206
#endif

207 208 209 210 211 212
#ifndef CONFIG_SLUB_TINY
#define __fastpath_inline __always_inline
#else
#define __fastpath_inline
#endif

213 214 215 216 217 218
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
219
#endif		/* CONFIG_SLUB_DEBUG */
220

221 222 223 224
/* Structure holding parameters for get_partial() call chain */
struct partial_context {
	gfp_t flags;
	unsigned int orig_size;
225
	void *object;
226 227
};

228 229 230
static inline bool kmem_cache_debug(struct kmem_cache *s)
{
	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
231
}
232

233 234 235 236 237 238
static inline bool slub_debug_orig_size(struct kmem_cache *s)
{
	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
			(s->flags & SLAB_KMALLOC));
}

239
void *fixup_red_left(struct kmem_cache *s, void *p)
240
{
241
	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
242 243 244 245 246
		p += s->red_left_pad;

	return p;
}

247 248 249 250 251 252 253 254 255
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
	return !kmem_cache_debug(s);
#else
	return false;
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
256 257 258 259 260 261 262 263
/*
 * Issues still to be resolved:
 *
 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 *
 * - Variable sizing of the per node arrays
 */

264 265 266
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG

267
#ifndef CONFIG_SLUB_TINY
268
/*
269
 * Minimum number of partial slabs. These will be left on the partial
270 271
 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 */
272
#define MIN_PARTIAL 5
Christoph Lameter's avatar
Christoph Lameter committed
273

274 275 276
/*
 * Maximum number of desirable partial slabs.
 * The existence of more partial slabs makes kmem_cache_shrink
277
 * sort the partial list by the number of objects in use.
278 279
 */
#define MAX_PARTIAL 10
280 281 282 283
#else
#define MIN_PARTIAL 0
#define MAX_PARTIAL 0
#endif
284

285
#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
Christoph Lameter's avatar
Christoph Lameter committed
286
				SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
287

288 289 290 291 292 293 294 295
/*
 * These debug flags cannot use CMPXCHG because there might be consistency
 * issues when checking or reading debug information
 */
#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
				SLAB_TRACE)


296
/*
297
 * Debugging flags that require metadata to be stored in the slab.  These get
298
 * disabled when slab_debug=O is used and a cache's min order increases with
299
 * metadata.
300
 */
301
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
302

303 304
#define OO_SHIFT	16
#define OO_MASK		((1 << OO_SHIFT) - 1)
305
#define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
306

Christoph Lameter's avatar
Christoph Lameter committed
307
/* Internal SLUB flags */
308
/* Poison object */
309
#define __OBJECT_POISON		__SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310
/* Use cmpxchg_double */
311 312

#ifdef system_has_freelist_aba
313
#define __CMPXCHG_DOUBLE	__SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314
#else
315
#define __CMPXCHG_DOUBLE	__SLAB_FLAG_UNUSED
316
#endif
Christoph Lameter's avatar
Christoph Lameter committed
317

318 319 320
/*
 * Tracking user of a slab.
 */
321
#define TRACK_ADDRS_COUNT 16
322
struct track {
323
	unsigned long addr;	/* Called from address */
324 325
#ifdef CONFIG_STACKDEPOT
	depot_stack_handle_t handle;
326
#endif
327 328 329 330 331 332 333
	int cpu;		/* Was running on cpu */
	int pid;		/* Pid context */
	unsigned long when;	/* When did the operation occur */
};

enum track_item { TRACK_ALLOC, TRACK_FREE };

334
#ifdef SLAB_SUPPORTS_SYSFS
Christoph Lameter's avatar
Christoph Lameter committed
335 336 337
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
#else
338 339 340
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
							{ return 0; }
Christoph Lameter's avatar
Christoph Lameter committed
341 342
#endif

343 344 345 346 347 348
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
static void debugfs_slab_add(struct kmem_cache *);
#else
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
enum stat_item {
	ALLOC_FASTPATH,		/* Allocation from cpu slab */
	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
	FREE_FASTPATH,		/* Free to cpu slab */
	FREE_SLOWPATH,		/* Freeing not to cpu slab */
	FREE_FROZEN,		/* Freeing to frozen slab */
	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
	FREE_SLAB,		/* Slab freed to the page allocator */
	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
	DEACTIVATE_BYPASS,	/* Implicit deactivation */
	ORDER_FALLBACK,		/* Number of times fallback was necessary */
	CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
	CMPXCHG_DOUBLE_FAIL,	/* Failures of slab freelist update */
	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
	NR_SLUB_STAT_ITEMS
};

#ifndef CONFIG_SLUB_TINY
/*
 * When changing the layout, make sure freelist and tid are still compatible
 * with this_cpu_cmpxchg_double() alignment requirements.
 */
struct kmem_cache_cpu {
	union {
		struct {
			void **freelist;	/* Pointer to next available object */
			unsigned long tid;	/* Globally unique transaction id */
		};
		freelist_aba_t freelist_tid;
	};
	struct slab *slab;	/* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
394
	struct slab *partial;	/* Partially allocated slabs */
395 396 397 398 399 400 401 402
#endif
	local_lock_t lock;	/* Protects the fields above */
#ifdef CONFIG_SLUB_STATS
	unsigned int stat[NR_SLUB_STAT_ITEMS];
#endif
};
#endif /* CONFIG_SLUB_TINY */

403
static inline void stat(const struct kmem_cache *s, enum stat_item si)
404 405
{
#ifdef CONFIG_SLUB_STATS
406 407 408 409 410
	/*
	 * The rmw is racy on a preemptible kernel but this is acceptable, so
	 * avoid this_cpu_add()'s irq-disable overhead.
	 */
	raw_cpu_inc(s->cpu_slab->stat[si]);
411 412 413
#endif
}

414 415 416 417 418 419 420 421
static inline
void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
{
#ifdef CONFIG_SLUB_STATS
	raw_cpu_add(s->cpu_slab->stat[si], v);
#endif
}

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
/*
 * The slab lists for all objects.
 */
struct kmem_cache_node {
	spinlock_t list_lock;
	unsigned long nr_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
	atomic_long_t total_objects;
	struct list_head full;
#endif
};

static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
	return s->node[node];
}

/*
 * Iterator over all nodes. The body will be executed for each node that has
 * a kmem_cache_node structure allocated (which is true for all online nodes)
 */
#define for_each_kmem_cache_node(__s, __node, __n) \
	for (__node = 0; __node < nr_node_ids; __node++) \
		 if ((__n = get_node(__s, __node)))

449 450 451 452 453 454 455 456
/*
 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
 * differ during memory hotplug/hotremove operations.
 * Protected by slab_mutex.
 */
static nodemask_t slab_nodes;

457
#ifndef CONFIG_SLUB_TINY
458 459 460 461
/*
 * Workqueue used for flush_cpu_slab().
 */
static struct workqueue_struct *flushwq;
462
#endif
463

Christoph Lameter's avatar
Christoph Lameter committed
464 465 466 467
/********************************************************************
 * 			Core slab cache functions
 *******************************************************************/

468 469 470 471 472 473
/*
 * freeptr_t represents a SLUB freelist pointer, which might be encoded
 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
 */
typedef struct { unsigned long v; } freeptr_t;

474 475 476 477 478
/*
 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 * with an XOR of the address where the pointer is held and a per-cache
 * random number.
 */
479 480
static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
					    void *ptr, unsigned long ptr_addr)
481
{
482 483
	unsigned long encoded;

484
#ifdef CONFIG_SLAB_FREELIST_HARDENED
485
	encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
486
#else
487
	encoded = (unsigned long)ptr;
488
#endif
489
	return (freeptr_t){.v = encoded};
490 491 492 493 494 495 496 497
}

static inline void *freelist_ptr_decode(const struct kmem_cache *s,
					freeptr_t ptr, unsigned long ptr_addr)
{
	void *decoded;

#ifdef CONFIG_SLAB_FREELIST_HARDENED
498
	decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
499
#else
500
	decoded = (void *)ptr.v;
501
#endif
502
	return decoded;
503 504
}

505 506
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
507 508 509
	unsigned long ptr_addr;
	freeptr_t p;

510
	object = kasan_reset_tag(object);
511 512 513
	ptr_addr = (unsigned long)object + s->offset;
	p = *(freeptr_t *)(ptr_addr);
	return freelist_ptr_decode(s, p, ptr_addr);
514 515
}

516
#ifndef CONFIG_SLUB_TINY
517 518
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
{
519
	prefetchw(object + s->offset);
520
}
521
#endif
522

523 524 525 526 527 528 529 530 531 532 533
/*
 * When running under KMSAN, get_freepointer_safe() may return an uninitialized
 * pointer value in the case the current thread loses the race for the next
 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
 * slab_alloc_node() will fail, so the uninitialized value won't be used, but
 * KMSAN will still check all arguments of cmpxchg because of imperfect
 * handling of inline assembly.
 * To work around this problem, we apply __no_kmsan_checks to ensure that
 * get_freepointer_safe() returns initialized memory.
 */
__no_kmsan_checks
534 535
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
536
	unsigned long freepointer_addr;
537
	freeptr_t p;
538

539
	if (!debug_pagealloc_enabled_static())
540 541
		return get_freepointer(s, object);

542
	object = kasan_reset_tag(object);
543
	freepointer_addr = (unsigned long)object + s->offset;
544 545
	copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
	return freelist_ptr_decode(s, p, freepointer_addr);
546 547
}

548 549
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
550 551
	unsigned long freeptr_addr = (unsigned long)object + s->offset;

552 553 554 555
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif

556
	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
557
	*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
558 559 560
}

/* Loop over all objects in a slab */
561
#define for_each_object(__p, __s, __addr, __objects) \
562 563 564
	for (__p = fixup_red_left(__s, __addr); \
		__p < (__addr) + (__objects) * (__s)->size; \
		__p += (__s)->size)
565

566
static inline unsigned int order_objects(unsigned int order, unsigned int size)
567
{
568
	return ((unsigned int)PAGE_SIZE << order) / size;
569 570
}

571
static inline struct kmem_cache_order_objects oo_make(unsigned int order,
572
		unsigned int size)
573 574
{
	struct kmem_cache_order_objects x = {
575
		(order << OO_SHIFT) + order_objects(order, size)
576 577 578 579 580
	};

	return x;
}

581
static inline unsigned int oo_order(struct kmem_cache_order_objects x)
582
{
583
	return x.x >> OO_SHIFT;
584 585
}

586
static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
587
{
588
	return x.x & OO_MASK;
589 590
}

591 592 593
#ifdef CONFIG_SLUB_CPU_PARTIAL
static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
{
594
	unsigned int nr_slabs;
595 596 597 598 599

	s->cpu_partial = nr_objects;

	/*
	 * We take the number of objects but actually limit the number of
600 601
	 * slabs on the per cpu partial list, in order to limit excessive
	 * growth of the list. For simplicity we assume that the slabs will
602 603
	 * be half-full.
	 */
604 605
	nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
	s->cpu_partial_slabs = nr_slabs;
606 607 608 609 610 611 612 613
}
#else
static inline void
slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
{
}
#endif /* CONFIG_SLUB_CPU_PARTIAL */

614 615 616
/*
 * Per slab locking using the pagelock
 */
617
static __always_inline void slab_lock(struct slab *slab)
618
{
619 620
	struct page *page = slab_page(slab);

621
	VM_BUG_ON_PAGE(PageTail(page), page);
622 623 624
	bit_spin_lock(PG_locked, &page->flags);
}

625
static __always_inline void slab_unlock(struct slab *slab)
626
{
627 628
	struct page *page = slab_page(slab);

629
	VM_BUG_ON_PAGE(PageTail(page), page);
630
	bit_spin_unlock(PG_locked, &page->flags);
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
static inline bool
__update_freelist_fast(struct slab *slab,
		      void *freelist_old, unsigned long counters_old,
		      void *freelist_new, unsigned long counters_new)
{
#ifdef system_has_freelist_aba
	freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
	freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };

	return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
#else
	return false;
#endif
}

static inline bool
__update_freelist_slow(struct slab *slab,
		      void *freelist_old, unsigned long counters_old,
		      void *freelist_new, unsigned long counters_new)
{
	bool ret = false;

	slab_lock(slab);
	if (slab->freelist == freelist_old &&
	    slab->counters == counters_old) {
		slab->freelist = freelist_new;
		slab->counters = counters_new;
		ret = true;
	}
	slab_unlock(slab);

	return ret;
}

667 668
/*
 * Interrupts must be disabled (for the fallback code to work right), typically
669 670 671 672
 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
 * part of bit_spin_lock(), is sufficient because the policy is not to allow any
 * allocation/ free operation in hardirq context. Therefore nothing can
 * interrupt the operation.
673
 */
674
static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
675 676 677 678
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
679 680
	bool ret;

681
	if (USE_LOCKLESS_FAST_PATH())
682
		lockdep_assert_irqs_disabled();
683

684
	if (s->flags & __CMPXCHG_DOUBLE) {
685 686 687 688 689
		ret = __update_freelist_fast(slab, freelist_old, counters_old,
				            freelist_new, counters_new);
	} else {
		ret = __update_freelist_slow(slab, freelist_old, counters_old,
				            freelist_new, counters_new);
690
	}
691 692
	if (likely(ret))
		return true;
693 694 695 696 697

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
698
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
699 700
#endif

701
	return false;
702 703
}

704
static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
705 706 707 708
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
709 710
	bool ret;

711
	if (s->flags & __CMPXCHG_DOUBLE) {
712 713 714
		ret = __update_freelist_fast(slab, freelist_old, counters_old,
				            freelist_new, counters_new);
	} else {
715 716 717
		unsigned long flags;

		local_irq_save(flags);
718 719
		ret = __update_freelist_slow(slab, freelist_old, counters_old,
				            freelist_new, counters_new);
720
		local_irq_restore(flags);
721
	}
722 723
	if (likely(ret))
		return true;
724 725 726 727 728

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
729
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
730 731
#endif

732
	return false;
733 734
}

735
#ifdef CONFIG_SLUB_DEBUG
736
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
737
static DEFINE_SPINLOCK(object_map_lock);
738

739
static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
740
		       struct slab *slab)
741
{
742
	void *addr = slab_address(slab);
743 744
	void *p;

745
	bitmap_zero(obj_map, slab->objects);
746

747
	for (p = slab->freelist; p; p = get_freepointer(s, p))
748 749 750
		set_bit(__obj_to_index(s, addr, p), obj_map);
}

751 752 753 754 755
#if IS_ENABLED(CONFIG_KUNIT)
static bool slab_add_kunit_errors(void)
{
	struct kunit_resource *resource;

756
	if (!kunit_get_current_test())
757 758 759 760 761 762 763 764 765 766 767 768 769 770
		return false;

	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
	if (!resource)
		return false;

	(*(int *)resource->data)++;
	kunit_put_resource(resource);
	return true;
}
#else
static inline bool slab_add_kunit_errors(void) { return false; }
#endif

771
static inline unsigned int size_from_object(struct kmem_cache *s)
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
{
	if (s->flags & SLAB_RED_ZONE)
		return s->size - s->red_left_pad;

	return s->size;
}

static inline void *restore_red_left(struct kmem_cache *s, void *p)
{
	if (s->flags & SLAB_RED_ZONE)
		p -= s->red_left_pad;

	return p;
}

787 788 789
/*
 * Debug settings:
 */
790
#if defined(CONFIG_SLUB_DEBUG_ON)
791
static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
792
#else
793
static slab_flags_t slub_debug;
794
#endif
795

796
static char *slub_debug_string;
797
static int disable_higher_order_debug;
798

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
/*
 * slub is about to manipulate internal object metadata.  This memory lies
 * outside the range of the allocated object, so accessing it would normally
 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 * to tell kasan that these accesses are OK.
 */
static inline void metadata_access_enable(void)
{
	kasan_disable_current();
}

static inline void metadata_access_disable(void)
{
	kasan_enable_current();
}

Christoph Lameter's avatar
Christoph Lameter committed
815 816 817
/*
 * Object debugging
 */
818 819 820

/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
821
				struct slab *slab, void *object)
822 823 824 825 826 827
{
	void *base;

	if (!object)
		return 1;

828
	base = slab_address(slab);
829
	object = kasan_reset_tag(object);
830
	object = restore_red_left(s, object);
831
	if (object < base || object >= base + slab->objects * s->size ||
832 833 834 835 836 837 838
		(object - base) % s->size) {
		return 0;
	}

	return 1;
}

839 840
static void print_section(char *level, char *text, u8 *addr,
			  unsigned int length)
Christoph Lameter's avatar
Christoph Lameter committed
841
{
842
	metadata_access_enable();
843 844
	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
			16, 1, kasan_reset_tag((void *)addr), length, 1);
845
	metadata_access_disable();
Christoph Lameter's avatar
Christoph Lameter committed
846 847
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
/*
 * See comment in calculate_sizes().
 */
static inline bool freeptr_outside_object(struct kmem_cache *s)
{
	return s->offset >= s->inuse;
}

/*
 * Return offset of the end of info block which is inuse + free pointer if
 * not overlapping with object.
 */
static inline unsigned int get_info_end(struct kmem_cache *s)
{
	if (freeptr_outside_object(s))
		return s->inuse + sizeof(void *);
	else
		return s->inuse;
}

Christoph Lameter's avatar
Christoph Lameter committed
868 869 870 871 872
static struct track *get_track(struct kmem_cache *s, void *object,
	enum track_item alloc)
{
	struct track *p;

873
	p = object + get_info_end(s);
Christoph Lameter's avatar
Christoph Lameter committed
874

875
	return kasan_reset_tag(p + alloc);
Christoph Lameter's avatar
Christoph Lameter committed
876 877
}

878
#ifdef CONFIG_STACKDEPOT
879 880 881
static noinline depot_stack_handle_t set_track_prepare(void)
{
	depot_stack_handle_t handle;
882
	unsigned long entries[TRACK_ADDRS_COUNT];
883
	unsigned int nr_entries;
884

885
	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
886 887 888 889 890 891 892 893 894
	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);

	return handle;
}
#else
static inline depot_stack_handle_t set_track_prepare(void)
{
	return 0;
}
895
#endif
896

897 898 899 900 901 902 903 904 905
static void set_track_update(struct kmem_cache *s, void *object,
			     enum track_item alloc, unsigned long addr,
			     depot_stack_handle_t handle)
{
	struct track *p = get_track(s, object, alloc);

#ifdef CONFIG_STACKDEPOT
	p->handle = handle;
#endif
906 907 908 909
	p->addr = addr;
	p->cpu = smp_processor_id();
	p->pid = current->pid;
	p->when = jiffies;
Christoph Lameter's avatar
Christoph Lameter committed
910 911
}

912 913 914 915 916 917 918 919
static __always_inline void set_track(struct kmem_cache *s, void *object,
				      enum track_item alloc, unsigned long addr)
{
	depot_stack_handle_t handle = set_track_prepare();

	set_track_update(s, object, alloc, addr, handle);
}

Christoph Lameter's avatar
Christoph Lameter committed
920 921
static void init_tracking(struct kmem_cache *s, void *object)
{
922 923
	struct track *p;

924 925 926
	if (!(s->flags & SLAB_STORE_USER))
		return;

927 928
	p = get_track(s, object, TRACK_ALLOC);
	memset(p, 0, 2*sizeof(struct track));
Christoph Lameter's avatar
Christoph Lameter committed
929 930
}

931
static void print_track(const char *s, struct track *t, unsigned long pr_time)
Christoph Lameter's avatar
Christoph Lameter committed
932
{
933 934
	depot_stack_handle_t handle __maybe_unused;

Christoph Lameter's avatar
Christoph Lameter committed
935 936 937
	if (!t->addr)
		return;

938
	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
939
	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
940 941 942 943 944 945
#ifdef CONFIG_STACKDEPOT
	handle = READ_ONCE(t->handle);
	if (handle)
		stack_depot_print(handle);
	else
		pr_err("object allocation/free stack trace missing\n");
946
#endif
947 948
}

949
void print_tracking(struct kmem_cache *s, void *object)
950
{
951
	unsigned long pr_time = jiffies;
952 953 954
	if (!(s->flags & SLAB_STORE_USER))
		return;

955 956
	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
957 958
}

959
static void print_slab_info(const struct slab *slab)
960
{
961
	struct folio *folio = (struct folio *)slab_folio(slab);
962

963 964 965
	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
	       slab, slab->objects, slab->inuse, slab->freelist,
	       folio_flags(folio, 0));
966 967
}

968 969 970 971 972 973 974 975 976 977
/*
 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
 * family will round up the real request size to these fixed ones, so
 * there could be an extra area than what is requested. Save the original
 * request size in the meta data area, for better debug and sanity check.
 */
static inline void set_orig_size(struct kmem_cache *s,
				void *object, unsigned int orig_size)
{
	void *p = kasan_reset_tag(object);
978
	unsigned int kasan_meta_size;
979 980 981 982

	if (!slub_debug_orig_size(s))
		return;

983
	/*
984 985 986 987
	 * KASAN can save its free meta data inside of the object at offset 0.
	 * If this meta data size is larger than 'orig_size', it will overlap
	 * the data redzone in [orig_size+1, object_size]. Thus, we adjust
	 * 'orig_size' to be as at least as big as KASAN's meta data.
988
	 */
989 990 991
	kasan_meta_size = kasan_metadata_size(s, true);
	if (kasan_meta_size > orig_size)
		orig_size = kasan_meta_size;
992

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	p += get_info_end(s);
	p += sizeof(struct track) * 2;

	*(unsigned int *)p = orig_size;
}

static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
{
	void *p = kasan_reset_tag(object);

	if (!slub_debug_orig_size(s))
		return s->object_size;

	p += get_info_end(s);
	p += sizeof(struct track) * 2;

	return *(unsigned int *)p;
}

1012 1013 1014 1015 1016
void skip_orig_size_check(struct kmem_cache *s, const void *object)
{
	set_orig_size(s, (void *)object, s->object_size);
}

1017 1018
static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
1019
	struct va_format vaf;
1020 1021 1022
	va_list args;

	va_start(args, fmt);
1023 1024
	vaf.fmt = fmt;
	vaf.va = &args;
1025
	pr_err("=============================================================================\n");
1026
	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1027
	pr_err("-----------------------------------------------------------------------------\n\n");
1028
	va_end(args);
Christoph Lameter's avatar
Christoph Lameter committed
1029 1030
}

1031
__printf(2, 3)
1032 1033
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
1034
	struct va_format vaf;
1035 1036
	va_list args;

1037 1038 1039
	if (slab_add_kunit_errors())
		return;

1040
	va_start(args, fmt);
1041 1042 1043
	vaf.fmt = fmt;
	vaf.va = &args;
	pr_err("FIX %s: %pV\n", s->name, &vaf);
1044 1045 1046
	va_end(args);
}

1047
static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
Christoph Lameter's avatar
Christoph Lameter committed
1048 1049
{
	unsigned int off;	/* Offset of last byte */
1050
	u8 *addr = slab_address(slab);
1051 1052 1053

	print_tracking(s, p);

1054
	print_slab_info(slab);
1055

1056
	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1057
	       p, p - addr, get_freepointer(s, p));
1058

1059
	if (s->flags & SLAB_RED_ZONE)
1060
		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
1061
			      s->red_left_pad);
1062
	else if (p > addr + 16)
1063
		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
Christoph Lameter's avatar
Christoph Lameter committed
1064

1065
	print_section(KERN_ERR,         "Object   ", p,
1066
		      min_t(unsigned int, s->object_size, PAGE_SIZE));
Christoph Lameter's avatar
Christoph Lameter committed
1067
	if (s->flags & SLAB_RED_ZONE)
1068
		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
1069
			s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
1070

1071
	off = get_info_end(s);
Christoph Lameter's avatar
Christoph Lameter committed
1072

1073
	if (s->flags & SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
1074 1075
		off += 2 * sizeof(struct track);

1076 1077 1078
	if (slub_debug_orig_size(s))
		off += sizeof(unsigned int);

1079
	off += kasan_metadata_size(s, false);
1080

1081
	if (off != size_from_object(s))
Christoph Lameter's avatar
Christoph Lameter committed
1082
		/* Beginning of the filler is the free pointer */
1083
		print_section(KERN_ERR, "Padding  ", p + off,
1084
			      size_from_object(s) - off);
1085 1086

	dump_stack();
Christoph Lameter's avatar
Christoph Lameter committed
1087 1088
}

1089
static void object_err(struct kmem_cache *s, struct slab *slab,
Christoph Lameter's avatar
Christoph Lameter committed
1090 1091
			u8 *object, char *reason)
{
1092 1093 1094
	if (slab_add_kunit_errors())
		return;

1095
	slab_bug(s, "%s", reason);
1096
	print_trailer(s, slab, object);
1097
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Christoph Lameter's avatar
Christoph Lameter committed
1098 1099
}

1100
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1101 1102 1103
			       void **freelist, void *nextfree)
{
	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1104 1105
	    !check_valid_pointer(s, slab, nextfree) && freelist) {
		object_err(s, slab, *freelist, "Freechain corrupt");
1106 1107 1108 1109 1110 1111 1112 1113
		*freelist = NULL;
		slab_fix(s, "Isolate corrupted freechain");
		return true;
	}

	return false;
}

1114
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1115
			const char *fmt, ...)
Christoph Lameter's avatar
Christoph Lameter committed
1116 1117 1118 1119
{
	va_list args;
	char buf[100];

1120 1121 1122
	if (slab_add_kunit_errors())
		return;

1123 1124
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter's avatar
Christoph Lameter committed
1125
	va_end(args);
1126
	slab_bug(s, "%s", buf);
1127
	print_slab_info(slab);
Christoph Lameter's avatar
Christoph Lameter committed
1128
	dump_stack();
1129
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Christoph Lameter's avatar
Christoph Lameter committed
1130 1131
}

1132
static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
1133
{
1134
	u8 *p = kasan_reset_tag(object);
1135
	unsigned int poison_size = s->object_size;
Christoph Lameter's avatar
Christoph Lameter committed
1136

1137
	if (s->flags & SLAB_RED_ZONE) {
1138 1139
		memset(p - s->red_left_pad, val, s->red_left_pad);

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
			/*
			 * Redzone the extra allocated space by kmalloc than
			 * requested, and the poison size will be limited to
			 * the original request size accordingly.
			 */
			poison_size = get_orig_size(s, object);
		}
	}

Christoph Lameter's avatar
Christoph Lameter committed
1150
	if (s->flags & __OBJECT_POISON) {
1151 1152
		memset(p, POISON_FREE, poison_size - 1);
		p[poison_size - 1] = POISON_END;
Christoph Lameter's avatar
Christoph Lameter committed
1153 1154 1155
	}

	if (s->flags & SLAB_RED_ZONE)
1156
		memset(p + poison_size, val, s->inuse - poison_size);
Christoph Lameter's avatar
Christoph Lameter committed
1157 1158
}

1159 1160 1161
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
						void *from, void *to)
{
1162
	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1163 1164 1165
	memset(from, data, to - from);
}

1166
static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1167
			u8 *object, char *what,
1168
			u8 *start, unsigned int value, unsigned int bytes)
1169 1170 1171
{
	u8 *fault;
	u8 *end;
1172
	u8 *addr = slab_address(slab);
1173

1174
	metadata_access_enable();
1175
	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1176
	metadata_access_disable();
1177 1178 1179 1180 1181 1182 1183
	if (!fault)
		return 1;

	end = start + bytes;
	while (end > fault && end[-1] == value)
		end--;

1184 1185 1186
	if (slab_add_kunit_errors())
		goto skip_bug_print;

1187
	slab_bug(s, "%s overwritten", what);
1188
	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1189 1190
					fault, end - 1, fault - addr,
					fault[0], value);
1191
	print_trailer(s, slab, object);
1192
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1193

1194
skip_bug_print:
1195 1196
	restore_bytes(s, what, value, fault, end);
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1197 1198 1199 1200 1201 1202 1203 1204
}

/*
 * Object layout:
 *
 * object address
 * 	Bytes of the object to be managed.
 * 	If the freepointer may overlay the object then the free
1205
 *	pointer is at the middle of the object.
Christoph Lameter's avatar
Christoph Lameter committed
1206
 *
Christoph Lameter's avatar
Christoph Lameter committed
1207 1208 1209
 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 * 	0xa5 (POISON_END)
 *
1210
 * object + s->object_size
Christoph Lameter's avatar
Christoph Lameter committed
1211
 * 	Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter's avatar
Christoph Lameter committed
1212
 * 	Padding is extended by another word if Redzoning is enabled and
1213
 * 	object_size == inuse.
Christoph Lameter's avatar
Christoph Lameter committed
1214
 *
Christoph Lameter's avatar
Christoph Lameter committed
1215 1216 1217 1218
 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 * 	0xcc (RED_ACTIVE) for objects in use.
 *
 * object + s->inuse
Christoph Lameter's avatar
Christoph Lameter committed
1219 1220
 * 	Meta data starts here.
 *
Christoph Lameter's avatar
Christoph Lameter committed
1221 1222
 * 	A. Free pointer (if we cannot overwrite object on free)
 * 	B. Tracking data for SLAB_STORE_USER
1223 1224
 *	C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
 *	D. Padding to reach required alignment boundary or at minimum
Christoph Lameter's avatar
Christoph Lameter committed
1225
 * 		one word if debugging is on to be able to detect writes
Christoph Lameter's avatar
Christoph Lameter committed
1226 1227 1228
 * 		before the word boundary.
 *
 *	Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter's avatar
Christoph Lameter committed
1229 1230
 *
 * object + s->size
Christoph Lameter's avatar
Christoph Lameter committed
1231
 * 	Nothing is used beyond s->size.
Christoph Lameter's avatar
Christoph Lameter committed
1232
 *
1233
 * If slabcaches are merged then the object_size and inuse boundaries are mostly
Christoph Lameter's avatar
Christoph Lameter committed
1234
 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter's avatar
Christoph Lameter committed
1235 1236 1237
 * may be used with merged slabcaches.
 */

1238
static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
Christoph Lameter's avatar
Christoph Lameter committed
1239
{
1240
	unsigned long off = get_info_end(s);	/* The end of info */
Christoph Lameter's avatar
Christoph Lameter committed
1241

1242
	if (s->flags & SLAB_STORE_USER) {
Christoph Lameter's avatar
Christoph Lameter committed
1243 1244 1245
		/* We also have user information there */
		off += 2 * sizeof(struct track);

1246 1247 1248 1249
		if (s->flags & SLAB_KMALLOC)
			off += sizeof(unsigned int);
	}

1250
	off += kasan_metadata_size(s, false);
1251

1252
	if (size_from_object(s) == off)
Christoph Lameter's avatar
Christoph Lameter committed
1253 1254
		return 1;

1255
	return check_bytes_and_report(s, slab, p, "Object padding",
1256
			p + off, POISON_INUSE, size_from_object(s) - off);
Christoph Lameter's avatar
Christoph Lameter committed
1257 1258
}

1259
/* Check the pad bytes at the end of a slab page */
1260
static void slab_pad_check(struct kmem_cache *s, struct slab *slab)
Christoph Lameter's avatar
Christoph Lameter committed
1261
{
1262 1263 1264
	u8 *start;
	u8 *fault;
	u8 *end;
1265
	u8 *pad;
1266 1267
	int length;
	int remainder;
Christoph Lameter's avatar
Christoph Lameter committed
1268 1269

	if (!(s->flags & SLAB_POISON))
1270
		return;
Christoph Lameter's avatar
Christoph Lameter committed
1271

1272 1273
	start = slab_address(slab);
	length = slab_size(slab);
1274 1275
	end = start + length;
	remainder = length % s->size;
Christoph Lameter's avatar
Christoph Lameter committed
1276
	if (!remainder)
1277
		return;
Christoph Lameter's avatar
Christoph Lameter committed
1278

1279
	pad = end - remainder;
1280
	metadata_access_enable();
1281
	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1282
	metadata_access_disable();
1283
	if (!fault)
1284
		return;
1285 1286 1287
	while (end > fault && end[-1] == POISON_INUSE)
		end--;

1288
	slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1289
			fault, end - 1, fault - start);
1290
	print_section(KERN_ERR, "Padding ", pad, remainder);
1291

1292
	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
Christoph Lameter's avatar
Christoph Lameter committed
1293 1294
}

1295
static int check_object(struct kmem_cache *s, struct slab *slab,
1296
					void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
1297 1298
{
	u8 *p = object;
1299
	u8 *endobject = object + s->object_size;
1300
	unsigned int orig_size, kasan_meta_size;
Christoph Lameter's avatar
Christoph Lameter committed
1301 1302

	if (s->flags & SLAB_RED_ZONE) {
1303
		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1304 1305 1306
			object - s->red_left_pad, val, s->red_left_pad))
			return 0;

1307
		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1308
			endobject, val, s->inuse - s->object_size))
Christoph Lameter's avatar
Christoph Lameter committed
1309
			return 0;
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320

		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
			orig_size = get_orig_size(s, object);

			if (s->object_size > orig_size  &&
				!check_bytes_and_report(s, slab, object,
					"kmalloc Redzone", p + orig_size,
					val, s->object_size - orig_size)) {
				return 0;
			}
		}
Christoph Lameter's avatar
Christoph Lameter committed
1321
	} else {
1322
		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1323
			check_bytes_and_report(s, slab, p, "Alignment padding",
1324 1325
				endobject, POISON_INUSE,
				s->inuse - s->object_size);
Ingo Molnar's avatar
Ingo Molnar committed
1326
		}
Christoph Lameter's avatar
Christoph Lameter committed
1327 1328 1329
	}

	if (s->flags & SLAB_POISON) {
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
			/*
			 * KASAN can save its free meta data inside of the
			 * object at offset 0. Thus, skip checking the part of
			 * the redzone that overlaps with the meta data.
			 */
			kasan_meta_size = kasan_metadata_size(s, true);
			if (kasan_meta_size < s->object_size - 1 &&
			    !check_bytes_and_report(s, slab, p, "Poison",
					p + kasan_meta_size, POISON_FREE,
					s->object_size - kasan_meta_size - 1))
				return 0;
			if (kasan_meta_size < s->object_size &&
			    !check_bytes_and_report(s, slab, p, "End Poison",
					p + s->object_size - 1, POISON_END, 1))
				return 0;
		}
Christoph Lameter's avatar
Christoph Lameter committed
1347 1348 1349
		/*
		 * check_pad_bytes cleans up on its own.
		 */
1350
		check_pad_bytes(s, slab, p);
Christoph Lameter's avatar
Christoph Lameter committed
1351 1352
	}

1353
	if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
Christoph Lameter's avatar
Christoph Lameter committed
1354 1355 1356 1357 1358 1359 1360
		/*
		 * Object and freepointer overlap. Cannot check
		 * freepointer while object is allocated.
		 */
		return 1;

	/* Check free pointer validity */
1361 1362
	if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
		object_err(s, slab, p, "Freepointer corrupt");
Christoph Lameter's avatar
Christoph Lameter committed
1363
		/*
1364
		 * No choice but to zap it and thus lose the remainder
Christoph Lameter's avatar
Christoph Lameter committed
1365
		 * of the free objects in this slab. May cause
Christoph Lameter's avatar
Christoph Lameter committed
1366
		 * another error because the object count is now wrong.
Christoph Lameter's avatar
Christoph Lameter committed
1367
		 */
1368
		set_freepointer(s, p, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
1369 1370 1371 1372 1373
		return 0;
	}
	return 1;
}

1374
static int check_slab(struct kmem_cache *s, struct slab *slab)
Christoph Lameter's avatar
Christoph Lameter committed
1375
{
1376 1377
	int maxobj;

1378 1379
	if (!folio_test_slab(slab_folio(slab))) {
		slab_err(s, slab, "Not a valid slab page");
Christoph Lameter's avatar
Christoph Lameter committed
1380 1381
		return 0;
	}
1382

1383 1384 1385 1386
	maxobj = order_objects(slab_order(slab), s->size);
	if (slab->objects > maxobj) {
		slab_err(s, slab, "objects %u > max %u",
			slab->objects, maxobj);
1387 1388
		return 0;
	}
1389 1390 1391
	if (slab->inuse > slab->objects) {
		slab_err(s, slab, "inuse %u > max %u",
			slab->inuse, slab->objects);
Christoph Lameter's avatar
Christoph Lameter committed
1392 1393 1394
		return 0;
	}
	/* Slab_pad_check fixes things up after itself */
1395
	slab_pad_check(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
1396 1397 1398 1399
	return 1;
}

/*
1400
 * Determine if a certain object in a slab is on the freelist. Must hold the
Christoph Lameter's avatar
Christoph Lameter committed
1401
 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter's avatar
Christoph Lameter committed
1402
 */
1403
static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
Christoph Lameter's avatar
Christoph Lameter committed
1404 1405
{
	int nr = 0;
1406
	void *fp;
Christoph Lameter's avatar
Christoph Lameter committed
1407
	void *object = NULL;
1408
	int max_objects;
Christoph Lameter's avatar
Christoph Lameter committed
1409

1410 1411
	fp = slab->freelist;
	while (fp && nr <= slab->objects) {
Christoph Lameter's avatar
Christoph Lameter committed
1412 1413
		if (fp == search)
			return 1;
1414
		if (!check_valid_pointer(s, slab, fp)) {
Christoph Lameter's avatar
Christoph Lameter committed
1415
			if (object) {
1416
				object_err(s, slab, object,
Christoph Lameter's avatar
Christoph Lameter committed
1417
					"Freechain corrupt");
1418
				set_freepointer(s, object, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
1419
			} else {
1420 1421 1422
				slab_err(s, slab, "Freepointer corrupt");
				slab->freelist = NULL;
				slab->inuse = slab->objects;
1423
				slab_fix(s, "Freelist cleared");
Christoph Lameter's avatar
Christoph Lameter committed
1424 1425 1426 1427 1428 1429 1430 1431 1432
				return 0;
			}
			break;
		}
		object = fp;
		fp = get_freepointer(s, object);
		nr++;
	}

1433
	max_objects = order_objects(slab_order(slab), s->size);
1434 1435
	if (max_objects > MAX_OBJS_PER_PAGE)
		max_objects = MAX_OBJS_PER_PAGE;
1436

1437 1438 1439 1440
	if (slab->objects != max_objects) {
		slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
			 slab->objects, max_objects);
		slab->objects = max_objects;
1441
		slab_fix(s, "Number of objects adjusted");
1442
	}
1443 1444 1445 1446
	if (slab->inuse != slab->objects - nr) {
		slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
			 slab->inuse, slab->objects - nr);
		slab->inuse = slab->objects - nr;
1447
		slab_fix(s, "Object count adjusted");
Christoph Lameter's avatar
Christoph Lameter committed
1448 1449 1450 1451
	}
	return search == NULL;
}

1452
static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1453
								int alloc)
1454 1455
{
	if (s->flags & SLAB_TRACE) {
1456
		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1457 1458
			s->name,
			alloc ? "alloc" : "free",
1459 1460
			object, slab->inuse,
			slab->freelist);
1461 1462

		if (!alloc)
1463
			print_section(KERN_INFO, "Object ", (void *)object,
1464
					s->object_size);
1465 1466 1467 1468 1469

		dump_stack();
	}
}

1470
/*
Christoph Lameter's avatar
Christoph Lameter committed
1471
 * Tracking of fully allocated slabs for debugging purposes.
1472
 */
1473
static void add_full(struct kmem_cache *s,
1474
	struct kmem_cache_node *n, struct slab *slab)
1475
{
1476 1477 1478
	if (!(s->flags & SLAB_STORE_USER))
		return;

1479
	lockdep_assert_held(&n->list_lock);
1480
	list_add(&slab->slab_list, &n->full);
1481 1482
}

1483
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1484 1485 1486 1487
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

1488
	lockdep_assert_held(&n->list_lock);
1489
	list_del(&slab->slab_list);
1490 1491
}

1492 1493 1494 1495 1496
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->nr_slabs);
}

1497
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1498 1499 1500
{
	struct kmem_cache_node *n = get_node(s, node);

1501 1502
	atomic_long_inc(&n->nr_slabs);
	atomic_long_add(objects, &n->total_objects);
1503
}
1504
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1505 1506 1507 1508
{
	struct kmem_cache_node *n = get_node(s, node);

	atomic_long_dec(&n->nr_slabs);
1509
	atomic_long_sub(objects, &n->total_objects);
1510 1511 1512
}

/* Object debug checks for alloc/free paths */
1513
static void setup_object_debug(struct kmem_cache *s, void *object)
1514
{
1515
	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1516 1517
		return;

1518
	init_object(s, object, SLUB_RED_INACTIVE);
1519 1520 1521
	init_tracking(s, object);
}

1522
static
1523
void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1524
{
1525
	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1526 1527 1528
		return;

	metadata_access_enable();
1529
	memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1530 1531 1532
	metadata_access_disable();
}

1533
static inline int alloc_consistency_checks(struct kmem_cache *s,
1534
					struct slab *slab, void *object)
Christoph Lameter's avatar
Christoph Lameter committed
1535
{
1536
	if (!check_slab(s, slab))
1537
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1538

1539 1540
	if (!check_valid_pointer(s, slab, object)) {
		object_err(s, slab, object, "Freelist Pointer check fails");
1541
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1542 1543
	}

1544
	if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1545 1546 1547 1548 1549
		return 0;

	return 1;
}

1550
static noinline bool alloc_debug_processing(struct kmem_cache *s,
1551
			struct slab *slab, void *object, int orig_size)
1552 1553
{
	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1554
		if (!alloc_consistency_checks(s, slab, object))
1555 1556
			goto bad;
	}
Christoph Lameter's avatar
Christoph Lameter committed
1557

1558
	/* Success. Perform special debug activities for allocs */
1559
	trace(s, slab, object, 1);
1560
	set_orig_size(s, object, orig_size);
1561
	init_object(s, object, SLUB_RED_ACTIVE);
1562
	return true;
1563

Christoph Lameter's avatar
Christoph Lameter committed
1564
bad:
1565
	if (folio_test_slab(slab_folio(slab))) {
Christoph Lameter's avatar
Christoph Lameter committed
1566 1567 1568
		/*
		 * If this is a slab page then lets do the best we can
		 * to avoid issues in the future. Marking all objects
Christoph Lameter's avatar
Christoph Lameter committed
1569
		 * as used avoids touching the remaining objects.
Christoph Lameter's avatar
Christoph Lameter committed
1570
		 */
1571
		slab_fix(s, "Marking all objects used");
1572 1573
		slab->inuse = slab->objects;
		slab->freelist = NULL;
Christoph Lameter's avatar
Christoph Lameter committed
1574
	}
1575
	return false;
Christoph Lameter's avatar
Christoph Lameter committed
1576 1577
}

1578
static inline int free_consistency_checks(struct kmem_cache *s,
1579
		struct slab *slab, void *object, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
1580
{
1581 1582
	if (!check_valid_pointer(s, slab, object)) {
		slab_err(s, slab, "Invalid object pointer 0x%p", object);
1583
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1584 1585
	}

1586 1587
	if (on_freelist(s, slab, object)) {
		object_err(s, slab, object, "Object already free");
1588
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1589 1590
	}

1591
	if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1592
		return 0;
Christoph Lameter's avatar
Christoph Lameter committed
1593

1594 1595 1596
	if (unlikely(s != slab->slab_cache)) {
		if (!folio_test_slab(slab_folio(slab))) {
			slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
Joe Perches's avatar
Joe Perches committed
1597
				 object);
1598
		} else if (!slab->slab_cache) {
1599 1600
			pr_err("SLUB <none>: no slab for object 0x%p.\n",
			       object);
1601
			dump_stack();
1602
		} else
1603
			object_err(s, slab, object,
1604
					"page slab pointer corrupt.");
1605 1606 1607 1608 1609
		return 0;
	}
	return 1;
}

1610
/*
1611
 * Parse a block of slab_debug options. Blocks are delimited by ';'
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
 *
 * @str:    start of block
 * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
 * @slabs:  return start of list of slabs, or NULL when there's no list
 * @init:   assume this is initial parsing and not per-kmem-create parsing
 *
 * returns the start of next block if there's any, or NULL
 */
static char *
parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1622
{
1623
	bool higher_order_disable = false;
1624

1625 1626 1627 1628 1629
	/* Skip any completely empty blocks */
	while (*str && *str == ';')
		str++;

	if (*str == ',') {
1630 1631 1632 1633
		/*
		 * No options but restriction on slabs. This means full
		 * debugging for slabs matching a pattern.
		 */
1634
		*flags = DEBUG_DEFAULT_FLAGS;
1635
		goto check_slabs;
1636 1637
	}
	*flags = 0;
1638

1639 1640
	/* Determine which debug features should be switched on */
	for (; *str && *str != ',' && *str != ';'; str++) {
1641
		switch (tolower(*str)) {
1642 1643 1644
		case '-':
			*flags = 0;
			break;
1645
		case 'f':
1646
			*flags |= SLAB_CONSISTENCY_CHECKS;
1647 1648
			break;
		case 'z':
1649
			*flags |= SLAB_RED_ZONE;
1650 1651
			break;
		case 'p':
1652
			*flags |= SLAB_POISON;
1653 1654
			break;
		case 'u':
1655
			*flags |= SLAB_STORE_USER;
1656 1657
			break;
		case 't':
1658
			*flags |= SLAB_TRACE;
1659
			break;
1660
		case 'a':
1661
			*flags |= SLAB_FAILSLAB;
1662
			break;
1663 1664 1665 1666 1667
		case 'o':
			/*
			 * Avoid enabling debugging on caches if its minimum
			 * order would increase as a result.
			 */
1668
			higher_order_disable = true;
1669
			break;
1670
		default:
1671
			if (init)
1672
				pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1673
		}
1674
	}
1675
check_slabs:
1676
	if (*str == ',')
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
		*slabs = ++str;
	else
		*slabs = NULL;

	/* Skip over the slab list */
	while (*str && *str != ';')
		str++;

	/* Skip any completely empty blocks */
	while (*str && *str == ';')
		str++;

	if (init && higher_order_disable)
		disable_higher_order_debug = 1;

	if (*str)
		return str;
	else
		return NULL;
}

static int __init setup_slub_debug(char *str)
{
	slab_flags_t flags;
1701
	slab_flags_t global_flags;
1702 1703 1704 1705 1706
	char *saved_str;
	char *slab_list;
	bool global_slub_debug_changed = false;
	bool slab_list_specified = false;

1707
	global_flags = DEBUG_DEFAULT_FLAGS;
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
	if (*str++ != '=' || !*str)
		/*
		 * No options specified. Switch on full debugging.
		 */
		goto out;

	saved_str = str;
	while (str) {
		str = parse_slub_debug_flags(str, &flags, &slab_list, true);

		if (!slab_list) {
1719
			global_flags = flags;
1720 1721 1722
			global_slub_debug_changed = true;
		} else {
			slab_list_specified = true;
1723
			if (flags & SLAB_STORE_USER)
1724
				stack_depot_request_early_init();
1725 1726 1727 1728 1729
		}
	}

	/*
	 * For backwards compatibility, a single list of flags with list of
1730
	 * slabs means debugging is only changed for those slabs, so the global
1731
	 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1732
	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1733 1734 1735 1736
	 * long as there is no option specifying flags without a slab list.
	 */
	if (slab_list_specified) {
		if (!global_slub_debug_changed)
1737
			global_flags = slub_debug;
1738 1739
		slub_debug_string = saved_str;
	}
1740
out:
1741
	slub_debug = global_flags;
1742
	if (slub_debug & SLAB_STORE_USER)
1743
		stack_depot_request_early_init();
1744 1745
	if (slub_debug != 0 || slub_debug_string)
		static_branch_enable(&slub_debug_enabled);
1746 1747
	else
		static_branch_disable(&slub_debug_enabled);
1748 1749 1750 1751
	if ((static_branch_unlikely(&init_on_alloc) ||
	     static_branch_unlikely(&init_on_free)) &&
	    (slub_debug & SLAB_POISON))
		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1752 1753 1754
	return 1;
}

1755 1756
__setup("slab_debug", setup_slub_debug);
__setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1757

1758 1759 1760 1761 1762 1763 1764
/*
 * kmem_cache_flags - apply debugging options to the cache
 * @flags:		flags to set
 * @name:		name of the cache
 *
 * Debug option(s) are applied to @flags. In addition to the debug
 * option(s), if a slab name (or multiple) is specified i.e.
1765
 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1766 1767
 * then only the select slabs will receive the debug option(s).
 */
1768
slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1769
{
1770 1771
	char *iter;
	size_t len;
1772 1773
	char *next_block;
	slab_flags_t block_flags;
1774 1775
	slab_flags_t slub_debug_local = slub_debug;

1776 1777 1778
	if (flags & SLAB_NO_USER_FLAGS)
		return flags;

1779 1780 1781 1782 1783 1784 1785
	/*
	 * If the slab cache is for debugging (e.g. kmemleak) then
	 * don't store user (stack trace) information by default,
	 * but let the user enable it via the command line below.
	 */
	if (flags & SLAB_NOLEAKTRACE)
		slub_debug_local &= ~SLAB_STORE_USER;
1786 1787

	len = strlen(name);
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	next_block = slub_debug_string;
	/* Go through all blocks of debug options, see if any matches our slab's name */
	while (next_block) {
		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
		if (!iter)
			continue;
		/* Found a block that has a slab list, search it */
		while (*iter) {
			char *end, *glob;
			size_t cmplen;

			end = strchrnul(iter, ',');
			if (next_block && next_block < end)
				end = next_block - 1;

			glob = strnchr(iter, end - iter, '*');
			if (glob)
				cmplen = glob - iter;
			else
				cmplen = max_t(size_t, len, (end - iter));
1808

1809 1810 1811 1812
			if (!strncmp(name, iter, cmplen)) {
				flags |= block_flags;
				return flags;
			}
1813

1814 1815 1816
			if (!*end || *end == ';')
				break;
			iter = end + 1;
1817 1818
		}
	}
1819

1820
	return flags | slub_debug_local;
1821
}
1822
#else /* !CONFIG_SLUB_DEBUG */
1823
static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1824
static inline
1825
void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1826

1827 1828
static inline bool alloc_debug_processing(struct kmem_cache *s,
	struct slab *slab, void *object, int orig_size) { return true; }
1829

1830 1831 1832
static inline bool free_debug_processing(struct kmem_cache *s,
	struct slab *slab, void *head, void *tail, int *bulk_cnt,
	unsigned long addr, depot_stack_handle_t handle) { return true; }
1833

1834
static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1835
static inline int check_object(struct kmem_cache *s, struct slab *slab,
1836
			void *object, u8 val) { return 1; }
1837
static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1838 1839
static inline void set_track(struct kmem_cache *s, void *object,
			     enum track_item alloc, unsigned long addr) {}
1840
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1841
					struct slab *slab) {}
1842
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1843
					struct slab *slab) {}
1844
slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1845 1846 1847
{
	return flags;
}
1848
#define slub_debug 0
1849

1850 1851
#define disable_higher_order_debug 0

1852 1853
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
							{ return 0; }
1854 1855 1856 1857
static inline void inc_slabs_node(struct kmem_cache *s, int node,
							int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
							int objects) {}
1858

1859
#ifndef CONFIG_SLUB_TINY
1860
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1861
			       void **freelist, void *nextfree)
1862 1863 1864
{
	return false;
}
1865
#endif
1866 1867
#endif /* CONFIG_SLUB_DEBUG */

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
{
	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
}

#ifdef CONFIG_MEMCG_KMEM
static inline void memcg_free_slab_cgroups(struct slab *slab)
{
	kfree(slab_objcgs(slab));
	slab->memcg_data = 0;
}

static inline size_t obj_full_size(struct kmem_cache *s)
{
	/*
	 * For each accounted object there is an extra space which is used
	 * to store obj_cgroup membership. Charge it too.
	 */
	return s->size + sizeof(struct obj_cgroup *);
}

/*
 * Returns false if the allocation should fail.
 */
1893 1894 1895 1896
static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s,
					struct list_lru *lru,
					struct obj_cgroup **objcgp,
					size_t objects, gfp_t flags)
1897 1898 1899 1900 1901 1902
{
	/*
	 * The obtained objcg pointer is safe to use within the current scope,
	 * defined by current task or set_active_memcg() pair.
	 * obj_cgroup_get() is used to get a permanent reference.
	 */
1903
	struct obj_cgroup *objcg = current_obj_cgroup();
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
	if (!objcg)
		return true;

	if (lru) {
		int ret;
		struct mem_cgroup *memcg;

		memcg = get_mem_cgroup_from_objcg(objcg);
		ret = memcg_list_lru_alloc(memcg, lru, flags);
		css_put(&memcg->css);

		if (ret)
			return false;
	}

	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
		return false;

	*objcgp = objcg;
	return true;
}

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
/*
 * Returns false if the allocation should fail.
 */
static __fastpath_inline
bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
			       struct obj_cgroup **objcgp, size_t objects,
			       gfp_t flags)
{
	if (!memcg_kmem_online())
		return true;

	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
		return true;

	return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects,
						  flags));
}

static void __memcg_slab_post_alloc_hook(struct kmem_cache *s,
					 struct obj_cgroup *objcg,
					 gfp_t flags, size_t size,
					 void **p)
1948 1949 1950 1951 1952
{
	struct slab *slab;
	unsigned long off;
	size_t i;

1953
	flags &= gfp_allowed_mask;
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975

	for (i = 0; i < size; i++) {
		if (likely(p[i])) {
			slab = virt_to_slab(p[i]);

			if (!slab_objcgs(slab) &&
			    memcg_alloc_slab_cgroups(slab, s, flags, false)) {
				obj_cgroup_uncharge(objcg, obj_full_size(s));
				continue;
			}

			off = obj_to_index(s, slab, p[i]);
			obj_cgroup_get(objcg);
			slab_objcgs(slab)[off] = objcg;
			mod_objcg_state(objcg, slab_pgdat(slab),
					cache_vmstat_idx(s), obj_full_size(s));
		} else {
			obj_cgroup_uncharge(objcg, obj_full_size(s));
		}
	}
}

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
static __fastpath_inline
void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
				gfp_t flags, size_t size, void **p)
{
	if (likely(!memcg_kmem_online() || !objcg))
		return;

	return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}

1986 1987 1988
static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
				   void **p, int objects,
				   struct obj_cgroup **objcgs)
1989
{
1990
	for (int i = 0; i < objects; i++) {
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
		struct obj_cgroup *objcg;
		unsigned int off;

		off = obj_to_index(s, slab, p[i]);
		objcg = objcgs[off];
		if (!objcg)
			continue;

		objcgs[off] = NULL;
		obj_cgroup_uncharge(objcg, obj_full_size(s));
		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
				-obj_full_size(s));
		obj_cgroup_put(objcg);
	}
}
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021

static __fastpath_inline
void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
			  int objects)
{
	struct obj_cgroup **objcgs;

	if (!memcg_kmem_online())
		return;

	objcgs = slab_objcgs(slab);
	if (likely(!objcgs))
		return;

	__memcg_slab_free_hook(s, slab, p, objects, objcgs);
}
2022 2023 2024 2025 2026 2027 2028 2029

static inline
void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
			   struct obj_cgroup *objcg)
{
	if (objcg)
		obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
}
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
#else /* CONFIG_MEMCG_KMEM */
static inline void memcg_free_slab_cgroups(struct slab *slab)
{
}

static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
					     struct list_lru *lru,
					     struct obj_cgroup **objcgp,
					     size_t objects, gfp_t flags)
{
	return true;
}

static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
					      struct obj_cgroup *objcg,
					      gfp_t flags, size_t size,
					      void **p)
{
}

static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
					void **p, int objects)
{
}
2054 2055 2056 2057 2058 2059

static inline
void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
				 struct obj_cgroup *objcg)
{
}
2060 2061
#endif /* CONFIG_MEMCG_KMEM */

2062 2063 2064
/*
 * Hooks for other subsystems that check memory allocations. In a typical
 * production configuration these hooks all should produce no code at all.
2065 2066
 *
 * Returns true if freeing of the object can proceed, false if its reuse
2067
 * was delayed by KASAN quarantine, or it was returned to KFENCE.
2068
 */
2069 2070
static __always_inline
bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
2071 2072
{
	kmemleak_free_recursive(x, s->flags);
2073
	kmsan_slab_free(s, x);
2074

2075
	debug_check_no_locks_freed(x, s->object_size);
2076 2077 2078

	if (!(s->flags & SLAB_DEBUG_OBJECTS))
		debug_check_no_obj_freed(x, s->object_size);
2079

2080 2081 2082 2083 2084
	/* Use KCSAN to help debug racy use-after-free. */
	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
		__kcsan_check_access(x, s->object_size,
				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);

2085 2086 2087
	if (kfence_free(x))
		return false;

2088 2089 2090 2091 2092 2093 2094 2095
	/*
	 * As memory initialization might be integrated into KASAN,
	 * kasan_slab_free and initialization memset's must be
	 * kept together to avoid discrepancies in behavior.
	 *
	 * The initialization memset's clear the object and the metadata,
	 * but don't touch the SLAB redzone.
	 */
2096
	if (unlikely(init)) {
2097 2098 2099 2100 2101 2102 2103 2104 2105
		int rsize;

		if (!kasan_has_integrated_init())
			memset(kasan_reset_tag(x), 0, s->object_size);
		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
		memset((char *)kasan_reset_tag(x) + s->inuse, 0,
		       s->size - s->inuse - rsize);
	}
	/* KASAN might put x into memory quarantine, delaying its reuse. */
2106
	return !kasan_slab_free(s, x, init);
2107
}
2108

2109
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
2110 2111
					   void **head, void **tail,
					   int *cnt)
2112
{
2113 2114 2115

	void *object;
	void *next = *head;
2116
	void *old_tail = *tail;
2117
	bool init;
2118

2119
	if (is_kfence_address(next)) {
2120
		slab_free_hook(s, next, false);
2121
		return false;
2122 2123
	}

2124 2125 2126
	/* Head and tail of the reconstructed freelist */
	*head = NULL;
	*tail = NULL;
2127

2128 2129
	init = slab_want_init_on_free(s);

2130 2131 2132 2133
	do {
		object = next;
		next = get_freepointer(s, object);

2134
		/* If object's reuse doesn't have to be delayed */
2135
		if (likely(slab_free_hook(s, object, init))) {
2136 2137 2138 2139 2140
			/* Move object to the new freelist */
			set_freepointer(s, object, *head);
			*head = object;
			if (!*tail)
				*tail = object;
2141 2142 2143 2144 2145 2146
		} else {
			/*
			 * Adjust the reconstructed freelist depth
			 * accordingly if object's reuse is delayed.
			 */
			--(*cnt);
2147 2148 2149 2150
		}
	} while (object != old_tail);

	return *head != NULL;
2151 2152
}

2153
static void *setup_object(struct kmem_cache *s, void *object)
2154
{
2155
	setup_object_debug(s, object);
2156
	object = kasan_init_slab_obj(s, object);
2157
	if (unlikely(s->ctor)) {
2158
		kasan_unpoison_new_object(s, object);
2159
		s->ctor(object);
2160
		kasan_poison_new_object(s, object);
2161
	}
2162
	return object;
2163 2164
}

Christoph Lameter's avatar
Christoph Lameter committed
2165 2166 2167
/*
 * Slab allocation and freeing
 */
2168 2169
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
		struct kmem_cache_order_objects oo)
2170
{
2171 2172
	struct folio *folio;
	struct slab *slab;
2173
	unsigned int order = oo_order(oo);
2174

2175
	folio = (struct folio *)alloc_pages_node(node, flags, order);
2176 2177 2178 2179 2180
	if (!folio)
		return NULL;

	slab = folio_slab(folio);
	__folio_set_slab(folio);
2181 2182
	/* Make the flag visible before any changes to folio->mapping */
	smp_wmb();
2183
	if (folio_is_pfmemalloc(folio))
2184 2185 2186
		slab_set_pfmemalloc(slab);

	return slab;
2187 2188
}

2189 2190 2191 2192
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Pre-initialize the random sequence cache */
static int init_cache_random_seq(struct kmem_cache *s)
{
2193
	unsigned int count = oo_objects(s->oo);
2194 2195
	int err;

2196 2197 2198 2199
	/* Bailout if already initialised */
	if (s->random_seq)
		return 0;

2200 2201 2202 2203 2204 2205 2206 2207 2208
	err = cache_random_seq_create(s, count, GFP_KERNEL);
	if (err) {
		pr_err("SLUB: Unable to initialize free list for %s\n",
			s->name);
		return err;
	}

	/* Transform to an offset on the set of pages */
	if (s->random_seq) {
2209 2210
		unsigned int i;

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
		for (i = 0; i < count; i++)
			s->random_seq[i] *= s->size;
	}
	return 0;
}

/* Initialize each random sequence freelist per cache */
static void __init init_freelist_randomization(void)
{
	struct kmem_cache *s;

	mutex_lock(&slab_mutex);

	list_for_each_entry(s, &slab_caches, list)
		init_cache_random_seq(s);

	mutex_unlock(&slab_mutex);
}

/* Get the next entry on the pre-computed freelist randomized */
2231
static void *next_freelist_entry(struct kmem_cache *s,
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
				unsigned long *pos, void *start,
				unsigned long page_limit,
				unsigned long freelist_count)
{
	unsigned int idx;

	/*
	 * If the target page allocation failed, the number of objects on the
	 * page might be smaller than the usual size defined by the cache.
	 */
	do {
		idx = s->random_seq[*pos];
		*pos += 1;
		if (*pos >= freelist_count)
			*pos = 0;
	} while (unlikely(idx >= page_limit));

	return (char *)start + idx;
}

/* Shuffle the single linked freelist based on a random pre-computed sequence */
2253
static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2254 2255 2256 2257 2258 2259
{
	void *start;
	void *cur;
	void *next;
	unsigned long idx, pos, page_limit, freelist_count;

2260
	if (slab->objects < 2 || !s->random_seq)
2261 2262 2263
		return false;

	freelist_count = oo_objects(s->oo);
2264
	pos = get_random_u32_below(freelist_count);
2265

2266 2267
	page_limit = slab->objects * s->size;
	start = fixup_red_left(s, slab_address(slab));
2268 2269

	/* First entry is used as the base of the freelist */
2270
	cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2271
	cur = setup_object(s, cur);
2272
	slab->freelist = cur;
2273

2274
	for (idx = 1; idx < slab->objects; idx++) {
2275
		next = next_freelist_entry(s, &pos, start, page_limit,
2276
			freelist_count);
2277
		next = setup_object(s, next);
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
		set_freepointer(s, cur, next);
		cur = next;
	}
	set_freepointer(s, cur, NULL);

	return true;
}
#else
static inline int init_cache_random_seq(struct kmem_cache *s)
{
	return 0;
}
static inline void init_freelist_randomization(void) { }
2291
static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2292 2293 2294 2295 2296
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
static __always_inline void account_slab(struct slab *slab, int order,
					 struct kmem_cache *s, gfp_t gfp)
{
	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
		memcg_alloc_slab_cgroups(slab, s, gfp, true);

	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
			    PAGE_SIZE << order);
}

static __always_inline void unaccount_slab(struct slab *slab, int order,
					   struct kmem_cache *s)
{
	if (memcg_kmem_online())
		memcg_free_slab_cgroups(slab);

	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
			    -(PAGE_SIZE << order));
}

2317
static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
Christoph Lameter's avatar
Christoph Lameter committed
2318
{
2319
	struct slab *slab;
2320
	struct kmem_cache_order_objects oo = s->oo;
2321
	gfp_t alloc_gfp;
2322
	void *start, *p, *next;
2323
	int idx;
2324
	bool shuffle;
Christoph Lameter's avatar
Christoph Lameter committed
2325

2326 2327
	flags &= gfp_allowed_mask;

2328
	flags |= s->allocflags;
2329

2330 2331 2332 2333 2334
	/*
	 * Let the initial higher-order allocation fail under memory pressure
	 * so we fall-back to the minimum order allocation.
	 */
	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2335
	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2336
		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2337

2338
	slab = alloc_slab_page(alloc_gfp, node, oo);
2339
	if (unlikely(!slab)) {
2340
		oo = s->min;
2341
		alloc_gfp = flags;
2342 2343 2344 2345
		/*
		 * Allocation may have failed due to fragmentation.
		 * Try a lower order alloc if possible
		 */
2346
		slab = alloc_slab_page(alloc_gfp, node, oo);
2347
		if (unlikely(!slab))
2348
			return NULL;
2349
		stat(s, ORDER_FALLBACK);
2350
	}
2351

2352
	slab->objects = oo_objects(oo);
2353 2354
	slab->inuse = 0;
	slab->frozen = 0;
Christoph Lameter's avatar
Christoph Lameter committed
2355

2356
	account_slab(slab, oo_order(oo), s, flags);
2357

2358
	slab->slab_cache = s;
Christoph Lameter's avatar
Christoph Lameter committed
2359

2360
	kasan_poison_slab(slab);
Christoph Lameter's avatar
Christoph Lameter committed
2361

2362
	start = slab_address(slab);
Christoph Lameter's avatar
Christoph Lameter committed
2363

2364
	setup_slab_debug(s, slab, start);
2365

2366
	shuffle = shuffle_freelist(s, slab);
2367 2368

	if (!shuffle) {
2369
		start = fixup_red_left(s, start);
2370
		start = setup_object(s, start);
2371 2372
		slab->freelist = start;
		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2373
			next = p + s->size;
2374
			next = setup_object(s, next);
2375 2376 2377 2378
			set_freepointer(s, p, next);
			p = next;
		}
		set_freepointer(s, p, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
2379 2380
	}

2381
	return slab;
Christoph Lameter's avatar
Christoph Lameter committed
2382 2383
}

2384
static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2385
{
2386 2387
	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);
2388

2389 2390
	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));

2391 2392 2393 2394
	return allocate_slab(s,
		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}

2395
static void __free_slab(struct kmem_cache *s, struct slab *slab)
Christoph Lameter's avatar
Christoph Lameter committed
2396
{
2397 2398
	struct folio *folio = slab_folio(slab);
	int order = folio_order(folio);
2399
	int pages = 1 << order;
Christoph Lameter's avatar
Christoph Lameter committed
2400

2401 2402
	__slab_clear_pfmemalloc(slab);
	folio->mapping = NULL;
2403 2404 2405
	/* Make the mapping reset visible before clearing the flag */
	smp_wmb();
	__folio_clear_slab(folio);
2406
	mm_account_reclaimed_pages(pages);
2407
	unaccount_slab(slab, order, s);
2408
	__free_pages(&folio->page, order);
Christoph Lameter's avatar
Christoph Lameter committed
2409 2410 2411 2412
}

static void rcu_free_slab(struct rcu_head *h)
{
2413
	struct slab *slab = container_of(h, struct slab, rcu_head);
2414

2415
	__free_slab(slab->slab_cache, slab);
Christoph Lameter's avatar
Christoph Lameter committed
2416 2417
}

2418
static void free_slab(struct kmem_cache *s, struct slab *slab)
Christoph Lameter's avatar
Christoph Lameter committed
2419
{
2420 2421 2422 2423 2424 2425 2426 2427 2428
	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
		void *p;

		slab_pad_check(s, slab);
		for_each_object(p, s, slab_address(slab), slab->objects)
			check_object(s, slab, p, SLUB_RED_INACTIVE);
	}

	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2429
		call_rcu(&slab->rcu_head, rcu_free_slab);
2430
	else
2431
		__free_slab(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
2432 2433
}

2434
static void discard_slab(struct kmem_cache *s, struct slab *slab)
Christoph Lameter's avatar
Christoph Lameter committed
2435
{
2436 2437
	dec_slabs_node(s, slab_nid(slab), slab->objects);
	free_slab(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
2438 2439
}

2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
/*
 * SLUB reuses PG_workingset bit to keep track of whether it's on
 * the per-node partial list.
 */
static inline bool slab_test_node_partial(const struct slab *slab)
{
	return folio_test_workingset((struct folio *)slab_folio(slab));
}

static inline void slab_set_node_partial(struct slab *slab)
{
	set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
}

static inline void slab_clear_node_partial(struct slab *slab)
{
	clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
}

Christoph Lameter's avatar
Christoph Lameter committed
2459
/*
2460
 * Management of partially allocated slabs.
Christoph Lameter's avatar
Christoph Lameter committed
2461
 */
2462
static inline void
2463
__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
Christoph Lameter's avatar
Christoph Lameter committed
2464
{
Christoph Lameter's avatar
Christoph Lameter committed
2465
	n->nr_partial++;
2466
	if (tail == DEACTIVATE_TO_TAIL)
2467
		list_add_tail(&slab->slab_list, &n->partial);
2468
	else
2469
		list_add(&slab->slab_list, &n->partial);
2470
	slab_set_node_partial(slab);
Christoph Lameter's avatar
Christoph Lameter committed
2471 2472
}

2473
static inline void add_partial(struct kmem_cache_node *n,
2474
				struct slab *slab, int tail)
2475
{
2476
	lockdep_assert_held(&n->list_lock);
2477
	__add_partial(n, slab, tail);
2478
}
2479

2480
static inline void remove_partial(struct kmem_cache_node *n,
2481
					struct slab *slab)
2482 2483
{
	lockdep_assert_held(&n->list_lock);
2484
	list_del(&slab->slab_list);
2485
	slab_clear_node_partial(slab);
2486
	n->nr_partial--;
2487 2488
}

2489
/*
2490
 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2491 2492 2493 2494 2495
 * slab from the n->partial list. Remove only a single object from the slab, do
 * the alloc_debug_processing() checks and leave the slab on the list, or move
 * it to full list if it was the last free object.
 */
static void *alloc_single_from_partial(struct kmem_cache *s,
2496
		struct kmem_cache_node *n, struct slab *slab, int orig_size)
2497 2498 2499 2500 2501 2502 2503 2504 2505
{
	void *object;

	lockdep_assert_held(&n->list_lock);

	object = slab->freelist;
	slab->freelist = get_freepointer(s, object);
	slab->inuse++;

2506
	if (!alloc_debug_processing(s, slab, object, orig_size)) {
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
		remove_partial(n, slab);
		return NULL;
	}

	if (slab->inuse == slab->objects) {
		remove_partial(n, slab);
		add_full(s, n, slab);
	}

	return object;
}

/*
 * Called only for kmem_cache_debug() caches to allocate from a freshly
 * allocated slab. Allocate a single object instead of whole freelist
 * and put the slab to the partial (or full) list.
 */
static void *alloc_single_from_new_slab(struct kmem_cache *s,
2525
					struct slab *slab, int orig_size)
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
{
	int nid = slab_nid(slab);
	struct kmem_cache_node *n = get_node(s, nid);
	unsigned long flags;
	void *object;


	object = slab->freelist;
	slab->freelist = get_freepointer(s, object);
	slab->inuse = 1;

2537
	if (!alloc_debug_processing(s, slab, object, orig_size))
2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
		/*
		 * It's not really expected that this would fail on a
		 * freshly allocated slab, but a concurrent memory
		 * corruption in theory could cause that.
		 */
		return NULL;

	spin_lock_irqsave(&n->list_lock, flags);

	if (slab->inuse == slab->objects)
		add_full(s, n, slab);
	else
		add_partial(n, slab, DEACTIVATE_TO_HEAD);

	inc_slabs_node(s, nid, slab->objects);
	spin_unlock_irqrestore(&n->list_lock, flags);

	return object;
}

2558
#ifdef CONFIG_SLUB_CPU_PARTIAL
2559
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2560
#else
2561
static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2562 2563
				   int drain) { }
#endif
2564
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2565

Christoph Lameter's avatar
Christoph Lameter committed
2566
/*
Christoph Lameter's avatar
Christoph Lameter committed
2567
 * Try to allocate a partial slab from a specific node.
Christoph Lameter's avatar
Christoph Lameter committed
2568
 */
2569 2570 2571
static struct slab *get_partial_node(struct kmem_cache *s,
				     struct kmem_cache_node *n,
				     struct partial_context *pc)
Christoph Lameter's avatar
Christoph Lameter committed
2572
{
2573
	struct slab *slab, *slab2, *partial = NULL;
2574
	unsigned long flags;
2575
	unsigned int partial_slabs = 0;
Christoph Lameter's avatar
Christoph Lameter committed
2576 2577 2578 2579

	/*
	 * Racy check. If we mistakenly see no partial slabs then we
	 * just allocate an empty slab. If we mistakenly try to get a
2580
	 * partial slab and there is none available then get_partial()
Christoph Lameter's avatar
Christoph Lameter committed
2581
	 * will return NULL.
Christoph Lameter's avatar
Christoph Lameter committed
2582 2583 2584 2585
	 */
	if (!n || !n->nr_partial)
		return NULL;

2586
	spin_lock_irqsave(&n->list_lock, flags);
2587
	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2588
		if (!pfmemalloc_match(slab, pc->flags))
2589 2590
			continue;

2591
		if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2592
			void *object = alloc_single_from_partial(s, n, slab,
2593
							pc->orig_size);
2594 2595 2596
			if (object) {
				partial = slab;
				pc->object = object;
2597
				break;
2598
			}
2599 2600 2601
			continue;
		}

2602
		remove_partial(n, slab);
2603

2604 2605
		if (!partial) {
			partial = slab;
2606 2607
			stat(s, ALLOC_FROM_PARTIAL);
		} else {
2608
			put_cpu_partial(s, slab, 0);
2609
			stat(s, CPU_PARTIAL_NODE);
2610
			partial_slabs++;
2611
		}
2612
#ifdef CONFIG_SLUB_CPU_PARTIAL
2613
		if (!kmem_cache_has_cpu_partial(s)
2614
			|| partial_slabs > s->cpu_partial_slabs / 2)
2615
			break;
2616 2617 2618
#else
		break;
#endif
2619

2620
	}
2621
	spin_unlock_irqrestore(&n->list_lock, flags);
2622
	return partial;
Christoph Lameter's avatar
Christoph Lameter committed
2623 2624 2625
}

/*
2626
 * Get a slab from somewhere. Search in increasing NUMA distances.
Christoph Lameter's avatar
Christoph Lameter committed
2627
 */
2628 2629
static struct slab *get_any_partial(struct kmem_cache *s,
				    struct partial_context *pc)
Christoph Lameter's avatar
Christoph Lameter committed
2630 2631 2632
{
#ifdef CONFIG_NUMA
	struct zonelist *zonelist;
2633
	struct zoneref *z;
2634
	struct zone *zone;
2635
	enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2636
	struct slab *slab;
2637
	unsigned int cpuset_mems_cookie;
Christoph Lameter's avatar
Christoph Lameter committed
2638 2639

	/*
Christoph Lameter's avatar
Christoph Lameter committed
2640 2641 2642 2643
	 * The defrag ratio allows a configuration of the tradeoffs between
	 * inter node defragmentation and node local allocations. A lower
	 * defrag_ratio increases the tendency to do local allocations
	 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter's avatar
Christoph Lameter committed
2644
	 *
Christoph Lameter's avatar
Christoph Lameter committed
2645 2646 2647 2648
	 * If the defrag_ratio is set to 0 then kmalloc() always
	 * returns node local objects. If the ratio is higher then kmalloc()
	 * may return off node objects because partial slabs are obtained
	 * from other nodes and filled up.
Christoph Lameter's avatar
Christoph Lameter committed
2649
	 *
2650 2651 2652 2653 2654
	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
	 * (which makes defrag_ratio = 1000) then every (well almost)
	 * allocation will first attempt to defrag slab caches on other nodes.
	 * This means scanning over all nodes to look for partial slabs which
	 * may be expensive if we do it every time we are trying to find a slab
Christoph Lameter's avatar
Christoph Lameter committed
2655
	 * with available objects.
Christoph Lameter's avatar
Christoph Lameter committed
2656
	 */
2657 2658
	if (!s->remote_node_defrag_ratio ||
			get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter's avatar
Christoph Lameter committed
2659 2660
		return NULL;

2661
	do {
2662
		cpuset_mems_cookie = read_mems_allowed_begin();
2663
		zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2664
		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2665 2666 2667 2668
			struct kmem_cache_node *n;

			n = get_node(s, zone_to_nid(zone));

2669
			if (n && cpuset_zone_allowed(zone, pc->flags) &&
2670
					n->nr_partial > s->min_partial) {
2671 2672
				slab = get_partial_node(s, n, pc);
				if (slab) {
2673
					/*
2674 2675 2676 2677 2678
					 * Don't check read_mems_allowed_retry()
					 * here - if mems_allowed was updated in
					 * parallel, that was a harmless race
					 * between allocation and the cpuset
					 * update
2679
					 */
2680
					return slab;
2681
				}
2682
			}
Christoph Lameter's avatar
Christoph Lameter committed
2683
		}
2684
	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2685
#endif	/* CONFIG_NUMA */
Christoph Lameter's avatar
Christoph Lameter committed
2686 2687 2688 2689
	return NULL;
}

/*
2690
 * Get a partial slab, lock it and return it.
Christoph Lameter's avatar
Christoph Lameter committed
2691
 */
2692 2693
static struct slab *get_partial(struct kmem_cache *s, int node,
				struct partial_context *pc)
Christoph Lameter's avatar
Christoph Lameter committed
2694
{
2695
	struct slab *slab;
2696 2697 2698 2699
	int searchnode = node;

	if (node == NUMA_NO_NODE)
		searchnode = numa_mem_id();
Christoph Lameter's avatar
Christoph Lameter committed
2700

2701 2702 2703
	slab = get_partial_node(s, get_node(s, searchnode), pc);
	if (slab || node != NUMA_NO_NODE)
		return slab;
Christoph Lameter's avatar
Christoph Lameter committed
2704

2705
	return get_any_partial(s, pc);
Christoph Lameter's avatar
Christoph Lameter committed
2706 2707
}

2708 2709
#ifndef CONFIG_SLUB_TINY

2710
#ifdef CONFIG_PREEMPTION
2711
/*
2712
 * Calculate the next globally unique transaction for disambiguation
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
 * during cmpxchg. The transactions start with the cpu number and are then
 * incremented by CONFIG_NR_CPUS.
 */
#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
#else
/*
 * No preemption supported therefore also no need to check for
 * different cpus.
 */
#define TID_STEP 1
2723
#endif /* CONFIG_PREEMPTION */
2724 2725 2726 2727 2728 2729

static inline unsigned long next_tid(unsigned long tid)
{
	return tid + TID_STEP;
}

2730
#ifdef SLUB_DEBUG_CMPXCHG
2731 2732 2733 2734 2735 2736 2737 2738 2739
static inline unsigned int tid_to_cpu(unsigned long tid)
{
	return tid % TID_STEP;
}

static inline unsigned long tid_to_event(unsigned long tid)
{
	return tid / TID_STEP;
}
2740
#endif
2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752

static inline unsigned int init_tid(int cpu)
{
	return cpu;
}

static inline void note_cmpxchg_failure(const char *n,
		const struct kmem_cache *s, unsigned long tid)
{
#ifdef SLUB_DEBUG_CMPXCHG
	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);

2753
	pr_info("%s %s: cmpxchg redo ", n, s->name);
2754

2755
#ifdef CONFIG_PREEMPTION
2756
	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2757
		pr_warn("due to cpu change %d -> %d\n",
2758 2759 2760 2761
			tid_to_cpu(tid), tid_to_cpu(actual_tid));
	else
#endif
	if (tid_to_event(tid) != tid_to_event(actual_tid))
2762
		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2763 2764
			tid_to_event(tid), tid_to_event(actual_tid));
	else
2765
		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2766 2767
			actual_tid, tid, next_tid(tid));
#endif
2768
	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2769 2770
}

2771
static void init_kmem_cache_cpus(struct kmem_cache *s)
2772 2773
{
	int cpu;
2774
	struct kmem_cache_cpu *c;
2775

2776 2777 2778 2779 2780
	for_each_possible_cpu(cpu) {
		c = per_cpu_ptr(s->cpu_slab, cpu);
		local_lock_init(&c->lock);
		c->tid = init_tid(cpu);
	}
2781
}
2782

Christoph Lameter's avatar
Christoph Lameter committed
2783
/*
2784
 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
2785 2786 2787
 * unfreezes the slabs and puts it on the proper list.
 * Assumes the slab has been already safely taken away from kmem_cache_cpu
 * by the caller.
Christoph Lameter's avatar
Christoph Lameter committed
2788
 */
2789
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
2790
			    void *freelist)
Christoph Lameter's avatar
Christoph Lameter committed
2791
{
2792
	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
2793
	int free_delta = 0;
2794
	void *nextfree, *freelist_iter, *freelist_tail;
2795
	int tail = DEACTIVATE_TO_HEAD;
2796
	unsigned long flags = 0;
2797 2798
	struct slab new;
	struct slab old;
2799

2800
	if (slab->freelist) {
2801
		stat(s, DEACTIVATE_REMOTE_FREES);
2802
		tail = DEACTIVATE_TO_TAIL;
2803 2804
	}

2805
	/*
2806 2807
	 * Stage one: Count the objects on cpu's freelist as free_delta and
	 * remember the last object in freelist_tail for later splicing.
2808
	 */
2809 2810 2811 2812
	freelist_tail = NULL;
	freelist_iter = freelist;
	while (freelist_iter) {
		nextfree = get_freepointer(s, freelist_iter);
2813

2814 2815
		/*
		 * If 'nextfree' is invalid, it is possible that the object at
2816 2817
		 * 'freelist_iter' is already corrupted.  So isolate all objects
		 * starting at 'freelist_iter' by skipping them.
2818
		 */
2819
		if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
2820 2821
			break;

2822 2823
		freelist_tail = freelist_iter;
		free_delta++;
2824

2825
		freelist_iter = nextfree;
2826 2827
	}

2828
	/*
2829 2830
	 * Stage two: Unfreeze the slab while splicing the per-cpu
	 * freelist to the head of slab's freelist.
2831
	 */
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
	do {
		old.freelist = READ_ONCE(slab->freelist);
		old.counters = READ_ONCE(slab->counters);
		VM_BUG_ON(!old.frozen);

		/* Determine target state of the slab */
		new.counters = old.counters;
		new.frozen = 0;
		if (freelist_tail) {
			new.inuse -= free_delta;
			set_freepointer(s, freelist_tail, old.freelist);
			new.freelist = freelist;
		} else {
			new.freelist = old.freelist;
		}
	} while (!slab_update_freelist(s, slab,
		old.freelist, old.counters,
		new.freelist, new.counters,
		"unfreezing slab"));
2851

2852 2853 2854
	/*
	 * Stage three: Manipulate the slab list based on the updated state.
	 */
2855
	if (!new.inuse && n->nr_partial >= s->min_partial) {
2856 2857 2858
		stat(s, DEACTIVATE_EMPTY);
		discard_slab(s, slab);
		stat(s, FREE_SLAB);
2859 2860 2861 2862
	} else if (new.freelist) {
		spin_lock_irqsave(&n->list_lock, flags);
		add_partial(n, slab, tail);
		spin_unlock_irqrestore(&n->list_lock, flags);
2863
		stat(s, tail);
2864
	} else {
2865
		stat(s, DEACTIVATE_FULL);
2866
	}
Christoph Lameter's avatar
Christoph Lameter committed
2867 2868
}

2869
#ifdef CONFIG_SLUB_CPU_PARTIAL
2870
static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
2871
{
2872
	struct kmem_cache_node *n = NULL, *n2 = NULL;
2873
	struct slab *slab, *slab_to_discard = NULL;
2874
	unsigned long flags = 0;
2875

2876 2877 2878
	while (partial_slab) {
		slab = partial_slab;
		partial_slab = slab->next;
2879

2880
		n2 = get_node(s, slab_nid(slab));
2881 2882
		if (n != n2) {
			if (n)
2883
				spin_unlock_irqrestore(&n->list_lock, flags);
2884 2885

			n = n2;
2886
			spin_lock_irqsave(&n->list_lock, flags);
2887
		}
2888

2889
		if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
2890 2891
			slab->next = slab_to_discard;
			slab_to_discard = slab;
2892
		} else {
2893
			add_partial(n, slab, DEACTIVATE_TO_TAIL);
2894
			stat(s, FREE_ADD_PARTIAL);
2895 2896 2897 2898
		}
	}

	if (n)
2899
		spin_unlock_irqrestore(&n->list_lock, flags);
2900

2901 2902 2903
	while (slab_to_discard) {
		slab = slab_to_discard;
		slab_to_discard = slab_to_discard->next;
2904 2905

		stat(s, DEACTIVATE_EMPTY);
2906
		discard_slab(s, slab);
2907 2908
		stat(s, FREE_SLAB);
	}
2909
}
2910

2911
/*
2912
 * Put all the cpu partial slabs to the node partial list.
2913
 */
2914
static void put_partials(struct kmem_cache *s)
2915
{
2916
	struct slab *partial_slab;
2917 2918
	unsigned long flags;

2919
	local_lock_irqsave(&s->cpu_slab->lock, flags);
2920
	partial_slab = this_cpu_read(s->cpu_slab->partial);
2921
	this_cpu_write(s->cpu_slab->partial, NULL);
2922
	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2923

2924
	if (partial_slab)
2925
		__put_partials(s, partial_slab);
2926 2927
}

2928 2929
static void put_partials_cpu(struct kmem_cache *s,
			     struct kmem_cache_cpu *c)
2930
{
2931
	struct slab *partial_slab;
2932

2933
	partial_slab = slub_percpu_partial(c);
2934 2935
	c->partial = NULL;

2936
	if (partial_slab)
2937
		__put_partials(s, partial_slab);
2938 2939 2940
}

/*
2941
 * Put a slab into a partial slab slot if available.
2942 2943 2944 2945
 *
 * If we did not find a slot then simply move all the partials to the
 * per node partial list.
 */
2946
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
2947
{
2948
	struct slab *oldslab;
2949
	struct slab *slab_to_put = NULL;
2950
	unsigned long flags;
2951
	int slabs = 0;
2952

2953
	local_lock_irqsave(&s->cpu_slab->lock, flags);
2954

2955
	oldslab = this_cpu_read(s->cpu_slab->partial);
2956

2957 2958
	if (oldslab) {
		if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
2959 2960 2961 2962 2963
			/*
			 * Partial array is full. Move the existing set to the
			 * per node partial list. Postpone the actual unfreezing
			 * outside of the critical section.
			 */
2964
			slab_to_put = oldslab;
2965
			oldslab = NULL;
2966
		} else {
2967
			slabs = oldslab->slabs;
2968
		}
2969
	}
2970

2971
	slabs++;
2972

2973 2974
	slab->slabs = slabs;
	slab->next = oldslab;
2975

2976
	this_cpu_write(s->cpu_slab->partial, slab);
2977

2978
	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2979

2980 2981
	if (slab_to_put) {
		__put_partials(s, slab_to_put);
2982 2983
		stat(s, CPU_PARTIAL_DRAIN);
	}
2984 2985
}

2986 2987
#else	/* CONFIG_SLUB_CPU_PARTIAL */

2988 2989 2990
static inline void put_partials(struct kmem_cache *s) { }
static inline void put_partials_cpu(struct kmem_cache *s,
				    struct kmem_cache_cpu *c) { }
2991 2992 2993

#endif	/* CONFIG_SLUB_CPU_PARTIAL */

2994
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter's avatar
Christoph Lameter committed
2995
{
2996
	unsigned long flags;
2997
	struct slab *slab;
2998 2999
	void *freelist;

3000
	local_lock_irqsave(&s->cpu_slab->lock, flags);
3001

3002
	slab = c->slab;
3003
	freelist = c->freelist;
3004

3005
	c->slab = NULL;
3006
	c->freelist = NULL;
3007
	c->tid = next_tid(c->tid);
3008

3009
	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3010

3011 3012
	if (slab) {
		deactivate_slab(s, slab, freelist);
3013 3014
		stat(s, CPUSLAB_FLUSH);
	}
Christoph Lameter's avatar
Christoph Lameter committed
3015 3016
}

3017
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter's avatar
Christoph Lameter committed
3018
{
3019
	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3020
	void *freelist = c->freelist;
3021
	struct slab *slab = c->slab;
Christoph Lameter's avatar
Christoph Lameter committed
3022

3023
	c->slab = NULL;
3024 3025 3026
	c->freelist = NULL;
	c->tid = next_tid(c->tid);

3027 3028
	if (slab) {
		deactivate_slab(s, slab, freelist);
3029 3030
		stat(s, CPUSLAB_FLUSH);
	}
3031

3032
	put_partials_cpu(s, c);
Christoph Lameter's avatar
Christoph Lameter committed
3033 3034
}

3035 3036 3037 3038 3039 3040
struct slub_flush_work {
	struct work_struct work;
	struct kmem_cache *s;
	bool skip;
};

3041 3042 3043
/*
 * Flush cpu slab.
 *
3044
 * Called from CPU work handler with migration disabled.
3045
 */
3046
static void flush_cpu_slab(struct work_struct *w)
Christoph Lameter's avatar
Christoph Lameter committed
3047
{
3048 3049 3050 3051 3052 3053 3054 3055
	struct kmem_cache *s;
	struct kmem_cache_cpu *c;
	struct slub_flush_work *sfw;

	sfw = container_of(w, struct slub_flush_work, work);

	s = sfw->s;
	c = this_cpu_ptr(s->cpu_slab);
3056

3057
	if (c->slab)
3058
		flush_slab(s, c);
Christoph Lameter's avatar
Christoph Lameter committed
3059

3060
	put_partials(s);
Christoph Lameter's avatar
Christoph Lameter committed
3061 3062
}

3063
static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3064 3065 3066
{
	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);

3067
	return c->slab || slub_percpu_partial(c);
3068 3069
}

3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089
static DEFINE_MUTEX(flush_lock);
static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);

static void flush_all_cpus_locked(struct kmem_cache *s)
{
	struct slub_flush_work *sfw;
	unsigned int cpu;

	lockdep_assert_cpus_held();
	mutex_lock(&flush_lock);

	for_each_online_cpu(cpu) {
		sfw = &per_cpu(slub_flush, cpu);
		if (!has_cpu_slab(cpu, s)) {
			sfw->skip = true;
			continue;
		}
		INIT_WORK(&sfw->work, flush_cpu_slab);
		sfw->skip = false;
		sfw->s = s;
3090
		queue_work_on(cpu, flushwq, &sfw->work);
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102
	}

	for_each_online_cpu(cpu) {
		sfw = &per_cpu(slub_flush, cpu);
		if (sfw->skip)
			continue;
		flush_work(&sfw->work);
	}

	mutex_unlock(&flush_lock);
}

Christoph Lameter's avatar
Christoph Lameter committed
3103 3104
static void flush_all(struct kmem_cache *s)
{
3105 3106 3107
	cpus_read_lock();
	flush_all_cpus_locked(s);
	cpus_read_unlock();
Christoph Lameter's avatar
Christoph Lameter committed
3108 3109
}

3110 3111 3112 3113 3114 3115 3116 3117 3118
/*
 * Use the cpu notifier to insure that the cpu slabs are flushed when
 * necessary.
 */
static int slub_cpu_dead(unsigned int cpu)
{
	struct kmem_cache *s;

	mutex_lock(&slab_mutex);
3119
	list_for_each_entry(s, &slab_caches, list)
3120 3121 3122 3123 3124
		__flush_cpu_slab(s, cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}

3125 3126 3127 3128 3129 3130 3131
#else /* CONFIG_SLUB_TINY */
static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
static inline void flush_all(struct kmem_cache *s) { }
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
#endif /* CONFIG_SLUB_TINY */

3132 3133 3134 3135
/*
 * Check if the objects in a per cpu structure fit numa
 * locality expectations.
 */
3136
static inline int node_match(struct slab *slab, int node)
3137 3138
{
#ifdef CONFIG_NUMA
3139
	if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3140 3141 3142 3143 3144
		return 0;
#endif
	return 1;
}

3145
#ifdef CONFIG_SLUB_DEBUG
3146
static int count_free(struct slab *slab)
3147
{
3148
	return slab->objects - slab->inuse;
3149 3150
}

3151 3152 3153 3154
static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->total_objects);
}
3155 3156

/* Supports checking bulk free of a constructed freelist */
3157 3158 3159
static inline bool free_debug_processing(struct kmem_cache *s,
	struct slab *slab, void *head, void *tail, int *bulk_cnt,
	unsigned long addr, depot_stack_handle_t handle)
3160
{
3161
	bool checks_ok = false;
3162 3163 3164 3165 3166 3167 3168 3169
	void *object = head;
	int cnt = 0;

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!check_slab(s, slab))
			goto out;
	}

3170
	if (slab->inuse < *bulk_cnt) {
3171
		slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3172
			 slab->inuse, *bulk_cnt);
3173 3174 3175
		goto out;
	}

3176
next_object:
3177

3178
	if (++cnt > *bulk_cnt)
3179
		goto out_cnt;
3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196

	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
		if (!free_consistency_checks(s, slab, object, addr))
			goto out;
	}

	if (s->flags & SLAB_STORE_USER)
		set_track_update(s, object, TRACK_FREE, addr, handle);
	trace(s, slab, object, 0);
	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
	init_object(s, object, SLUB_RED_INACTIVE);

	/* Reached end of constructed freelist yet? */
	if (object != tail) {
		object = get_freepointer(s, object);
		goto next_object;
	}
3197
	checks_ok = true;
3198

3199
out_cnt:
3200
	if (cnt != *bulk_cnt) {
3201
		slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3202 3203
			 *bulk_cnt, cnt);
		*bulk_cnt = cnt;
3204 3205
	}

3206
out:
3207 3208

	if (!checks_ok)
3209
		slab_fix(s, "Object at 0x%p not freed", object);
3210

3211
	return checks_ok;
3212
}
3213 3214
#endif /* CONFIG_SLUB_DEBUG */

3215
#if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3216
static unsigned long count_partial(struct kmem_cache_node *n,
3217
					int (*get_count)(struct slab *))
3218 3219 3220
{
	unsigned long flags;
	unsigned long x = 0;
3221
	struct slab *slab;
3222 3223

	spin_lock_irqsave(&n->list_lock, flags);
3224 3225
	list_for_each_entry(slab, &n->partial, slab_list)
		x += get_count(slab);
3226 3227 3228
	spin_unlock_irqrestore(&n->list_lock, flags);
	return x;
}
3229
#endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3230

3231
#ifdef CONFIG_SLUB_DEBUG
3232 3233 3234
static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
3235 3236
	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);
3237
	int node;
3238
	struct kmem_cache_node *n;
3239

3240 3241 3242
	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
		return;

3243 3244
	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nid, gfpflags, &gfpflags);
3245
	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3246 3247
		s->name, s->object_size, s->size, oo_order(s->oo),
		oo_order(s->min));
3248

3249
	if (oo_order(s->min) > get_order(s->object_size))
3250
		pr_warn("  %s debugging increased min order, use slab_debug=O to disable.\n",
3251
			s->name);
3252

3253
	for_each_kmem_cache_node(s, node, n) {
3254 3255 3256 3257
		unsigned long nr_slabs;
		unsigned long nr_objs;
		unsigned long nr_free;

3258 3259 3260
		nr_free  = count_partial(n, count_free);
		nr_slabs = node_nr_slabs(n);
		nr_objs  = node_nr_objs(n);
3261

3262
		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
3263 3264 3265
			node, nr_slabs, nr_objs, nr_free);
	}
}
3266 3267 3268 3269
#else /* CONFIG_SLUB_DEBUG */
static inline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
#endif
3270

3271
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3272
{
3273
	if (unlikely(slab_test_pfmemalloc(slab)))
3274 3275 3276 3277 3278
		return gfp_pfmemalloc_allowed(gfpflags);

	return true;
}

3279
#ifndef CONFIG_SLUB_TINY
3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291
static inline bool
__update_cpu_freelist_fast(struct kmem_cache *s,
			   void *freelist_old, void *freelist_new,
			   unsigned long tid)
{
	freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
	freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };

	return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
					     &old.full, new.full);
}

3292
/*
3293 3294
 * Check the slab->freelist and either transfer the freelist to the
 * per cpu freelist or deactivate the slab.
3295
 *
3296
 * The slab is still frozen if the return value is not NULL.
3297
 *
3298
 * If this function returns NULL then the slab has been unfrozen.
3299
 */
3300
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3301
{
3302
	struct slab new;
3303 3304 3305
	unsigned long counters;
	void *freelist;

3306 3307
	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));

3308
	do {
3309 3310
		freelist = slab->freelist;
		counters = slab->counters;
3311

3312 3313
		new.counters = counters;

3314
		new.inuse = slab->objects;
3315 3316
		new.frozen = freelist != NULL;

3317
	} while (!__slab_update_freelist(s, slab,
3318 3319 3320 3321 3322 3323 3324
		freelist, counters,
		NULL, new.counters,
		"get_freelist"));

	return freelist;
}

3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
/*
 * Freeze the partial slab and return the pointer to the freelist.
 */
static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
{
	struct slab new;
	unsigned long counters;
	void *freelist;

	do {
		freelist = slab->freelist;
		counters = slab->counters;

		new.counters = counters;
		VM_BUG_ON(new.frozen);

		new.inuse = slab->objects;
		new.frozen = 1;

	} while (!slab_update_freelist(s, slab,
		freelist, counters,
		NULL, new.counters,
		"freeze_slab"));

	return freelist;
}

Christoph Lameter's avatar
Christoph Lameter committed
3352
/*
3353 3354 3355 3356 3357 3358
 * Slow path. The lockless freelist is empty or we need to perform
 * debugging duties.
 *
 * Processing is still very fast if new objects have been freed to the
 * regular freelist. In that case we simply take over the regular freelist
 * as the lockless freelist and zap the regular freelist.
Christoph Lameter's avatar
Christoph Lameter committed
3359
 *
3360 3361 3362
 * If that is not working then we fall back to the partial lists. We take the
 * first element of the freelist as the object to allocate now and move the
 * rest of the freelist to the lockless freelist.
Christoph Lameter's avatar
Christoph Lameter committed
3363
 *
3364
 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter's avatar
Christoph Lameter committed
3365 3366
 * we need to allocate a new slab. This is the slowest path since it involves
 * a call to the page allocator and the setup of a new slab.
3367
 *
3368
 * Version of __slab_alloc to use when we know that preemption is
3369
 * already disabled (which is the case for bulk allocation).
Christoph Lameter's avatar
Christoph Lameter committed
3370
 */
3371
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3372
			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
Christoph Lameter's avatar
Christoph Lameter committed
3373
{
3374
	void *freelist;
3375
	struct slab *slab;
3376
	unsigned long flags;
3377
	struct partial_context pc;
Christoph Lameter's avatar
Christoph Lameter committed
3378

3379 3380
	stat(s, ALLOC_SLOWPATH);

3381
reread_slab:
3382

3383 3384
	slab = READ_ONCE(c->slab);
	if (!slab) {
3385 3386 3387 3388 3389
		/*
		 * if the node is not online or has no normal memory, just
		 * ignore the node constraint
		 */
		if (unlikely(node != NUMA_NO_NODE &&
3390
			     !node_isset(node, slab_nodes)))
3391
			node = NUMA_NO_NODE;
Christoph Lameter's avatar
Christoph Lameter committed
3392
		goto new_slab;
3393
	}
3394

3395
	if (unlikely(!node_match(slab, node))) {
3396 3397 3398 3399
		/*
		 * same as above but node_match() being false already
		 * implies node != NUMA_NO_NODE
		 */
3400
		if (!node_isset(node, slab_nodes)) {
3401 3402
			node = NUMA_NO_NODE;
		} else {
3403
			stat(s, ALLOC_NODE_MISMATCH);
3404
			goto deactivate_slab;
3405
		}
3406
	}
Christoph Lameter's avatar
Christoph Lameter committed
3407

3408 3409 3410 3411 3412
	/*
	 * By rights, we should be searching for a slab page that was
	 * PFMEMALLOC but right now, we are losing the pfmemalloc
	 * information when the page leaves the per-cpu allocator
	 */
3413
	if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3414
		goto deactivate_slab;
3415

3416
	/* must check again c->slab in case we got preempted and it changed */
3417
	local_lock_irqsave(&s->cpu_slab->lock, flags);
3418
	if (unlikely(slab != c->slab)) {
3419
		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3420
		goto reread_slab;
3421
	}
3422 3423
	freelist = c->freelist;
	if (freelist)
3424
		goto load_freelist;
3425

3426
	freelist = get_freelist(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
3427

3428
	if (!freelist) {
3429
		c->slab = NULL;
3430
		c->tid = next_tid(c->tid);
3431
		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3432
		stat(s, DEACTIVATE_BYPASS);
3433
		goto new_slab;
3434
	}
Christoph Lameter's avatar
Christoph Lameter committed
3435

3436
	stat(s, ALLOC_REFILL);
Christoph Lameter's avatar
Christoph Lameter committed
3437

3438
load_freelist:
3439

3440
	lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3441

3442 3443
	/*
	 * freelist is pointing to the list of objects to be used.
3444 3445
	 * slab is pointing to the slab from which the objects are obtained.
	 * That slab must be frozen for per cpu allocations to work.
3446
	 */
3447
	VM_BUG_ON(!c->slab->frozen);
3448
	c->freelist = get_freepointer(s, freelist);
3449
	c->tid = next_tid(c->tid);
3450
	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3451
	return freelist;
Christoph Lameter's avatar
Christoph Lameter committed
3452

3453 3454
deactivate_slab:

3455
	local_lock_irqsave(&s->cpu_slab->lock, flags);
3456
	if (slab != c->slab) {
3457
		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3458
		goto reread_slab;
3459
	}
3460
	freelist = c->freelist;
3461
	c->slab = NULL;
3462
	c->freelist = NULL;
3463
	c->tid = next_tid(c->tid);
3464
	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3465
	deactivate_slab(s, slab, freelist);
3466

Christoph Lameter's avatar
Christoph Lameter committed
3467
new_slab:
3468

3469 3470
#ifdef CONFIG_SLUB_CPU_PARTIAL
	while (slub_percpu_partial(c)) {
3471
		local_lock_irqsave(&s->cpu_slab->lock, flags);
3472
		if (unlikely(c->slab)) {
3473
			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3474
			goto reread_slab;
3475
		}
3476
		if (unlikely(!slub_percpu_partial(c))) {
3477
			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3478 3479
			/* we were preempted and partial list got empty */
			goto new_objects;
3480
		}
3481

3482
		slab = slub_percpu_partial(c);
3483
		slub_set_percpu_partial(c, slab);
3484

3485 3486 3487 3488 3489 3490 3491
		if (likely(node_match(slab, node) &&
			   pfmemalloc_match(slab, gfpflags))) {
			c->slab = slab;
			freelist = get_freelist(s, slab);
			VM_BUG_ON(!freelist);
			stat(s, CPU_PARTIAL_ALLOC);
			goto load_freelist;
3492 3493
		}

3494 3495 3496 3497
		local_unlock_irqrestore(&s->cpu_slab->lock, flags);

		slab->next = NULL;
		__put_partials(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
3498
	}
3499
#endif
Christoph Lameter's avatar
Christoph Lameter committed
3500

3501 3502
new_objects:

3503 3504
	pc.flags = gfpflags;
	pc.orig_size = orig_size;
3505 3506
	slab = get_partial(s, node, &pc);
	if (slab) {
3507
		if (kmem_cache_debug(s)) {
3508
			freelist = pc.object;
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519
			/*
			 * For debug caches here we had to go through
			 * alloc_single_from_partial() so just store the
			 * tracking info and return the object.
			 */
			if (s->flags & SLAB_STORE_USER)
				set_track(s, freelist, TRACK_ALLOC, addr);

			return freelist;
		}

3520
		freelist = freeze_slab(s, slab);
3521 3522
		goto retry_load_slab;
	}
3523

3524
	slub_put_cpu_ptr(s->cpu_slab);
3525
	slab = new_slab(s, gfpflags, node);
3526
	c = slub_get_cpu_ptr(s->cpu_slab);
3527

3528
	if (unlikely(!slab)) {
3529
		slab_out_of_memory(s, gfpflags, node);
3530
		return NULL;
Christoph Lameter's avatar
Christoph Lameter committed
3531
	}
3532

3533 3534 3535
	stat(s, ALLOC_SLAB);

	if (kmem_cache_debug(s)) {
3536
		freelist = alloc_single_from_new_slab(s, slab, orig_size);
3537 3538 3539 3540 3541 3542 3543 3544 3545 3546

		if (unlikely(!freelist))
			goto new_objects;

		if (s->flags & SLAB_STORE_USER)
			set_track(s, freelist, TRACK_ALLOC, addr);

		return freelist;
	}

3547
	/*
3548
	 * No other reference to the slab yet so we can
3549 3550
	 * muck around with it freely without cmpxchg
	 */
3551 3552
	freelist = slab->freelist;
	slab->freelist = NULL;
3553 3554
	slab->inuse = slab->objects;
	slab->frozen = 1;
3555

3556
	inc_slabs_node(s, slab_nid(slab), slab->objects);
3557

3558
	if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3559 3560 3561 3562
		/*
		 * For !pfmemalloc_match() case we don't load freelist so that
		 * we don't make further mismatched allocations easier.
		 */
3563 3564 3565
		deactivate_slab(s, slab, get_freepointer(s, freelist));
		return freelist;
	}
3566

3567
retry_load_slab:
3568

3569
	local_lock_irqsave(&s->cpu_slab->lock, flags);
3570
	if (unlikely(c->slab)) {
3571
		void *flush_freelist = c->freelist;
3572
		struct slab *flush_slab = c->slab;
3573

3574
		c->slab = NULL;
3575 3576 3577
		c->freelist = NULL;
		c->tid = next_tid(c->tid);

3578
		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3579

3580
		deactivate_slab(s, flush_slab, flush_freelist);
3581 3582 3583

		stat(s, CPUSLAB_FLUSH);

3584
		goto retry_load_slab;
3585
	}
3586
	c->slab = slab;
3587

3588
	goto load_freelist;
3589 3590
}

3591
/*
3592 3593 3594
 * A wrapper for ___slab_alloc() for contexts where preemption is not yet
 * disabled. Compensates for possible cpu changes by refetching the per cpu area
 * pointer.
3595 3596
 */
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3597
			  unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3598 3599 3600
{
	void *p;

3601
#ifdef CONFIG_PREEMPT_COUNT
3602 3603
	/*
	 * We may have been preempted and rescheduled on a different
3604
	 * cpu before disabling preemption. Need to reload cpu area
3605 3606
	 * pointer.
	 */
3607
	c = slub_get_cpu_ptr(s->cpu_slab);
3608 3609
#endif

3610
	p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3611
#ifdef CONFIG_PREEMPT_COUNT
3612
	slub_put_cpu_ptr(s->cpu_slab);
3613
#endif
3614 3615 3616
	return p;
}

3617
static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3618
		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3619
{
3620
	struct kmem_cache_cpu *c;
3621
	struct slab *slab;
3622
	unsigned long tid;
3623
	void *object;
3624

3625 3626 3627 3628 3629 3630
redo:
	/*
	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
	 * enabled. We may switch back and forth between cpus while
	 * reading from one cpu area. That does not matter as long
	 * as we end up on the original cpu again when doing the cmpxchg.
3631
	 *
3632 3633 3634 3635 3636
	 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
	 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
	 * the tid. If we are preempted and switched to another cpu between the
	 * two reads, it's OK as the two are still associated with the same cpu
	 * and cmpxchg later will validate the cpu.
3637
	 */
3638 3639
	c = raw_cpu_ptr(s->cpu_slab);
	tid = READ_ONCE(c->tid);
3640 3641 3642 3643

	/*
	 * Irqless object alloc/free algorithm used here depends on sequence
	 * of fetching cpu_slab's data. tid should be fetched before anything
3644
	 * on c to guarantee that object and slab associated with previous tid
3645
	 * won't be used with current tid. If we fetch tid first, object and
3646
	 * slab could be one associated with next tid and our alloc/free
3647 3648 3649
	 * request will be failed. In this case, we will retry. So, no problem.
	 */
	barrier();
3650 3651 3652 3653 3654 3655 3656 3657

	/*
	 * The transaction ids are globally unique per cpu and per operation on
	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
	 * occurs on the right processor and that there was no operation on the
	 * linked list in between.
	 */

3658
	object = c->freelist;
3659
	slab = c->slab;
3660 3661

	if (!USE_LOCKLESS_FAST_PATH() ||
3662
	    unlikely(!object || !slab || !node_match(slab, node))) {
3663
		object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3664
	} else {
3665 3666
		void *next_object = get_freepointer_safe(s, object);

3667
		/*
Lucas De Marchi's avatar
Lucas De Marchi committed
3668
		 * The cmpxchg will only match if there was no additional
3669 3670
		 * operation and if we are on the right processor.
		 *
3671 3672
		 * The cmpxchg does the following atomically (without lock
		 * semantics!)
3673 3674 3675 3676
		 * 1. Relocate first pointer to the current per cpu area.
		 * 2. Verify that tid and freelist have not been changed
		 * 3. If they were not changed replace tid and freelist
		 *
3677 3678 3679
		 * Since this is without lock semantics the protection is only
		 * against code executing on this cpu *not* from access by
		 * other cpus.
3680
		 */
3681
		if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
3682 3683 3684
			note_cmpxchg_failure("slab_alloc", s, tid);
			goto redo;
		}
3685
		prefetch_freepointer(s, next_object);
3686
		stat(s, ALLOC_FASTPATH);
3687
	}
3688

3689 3690
	return object;
}
3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
#else /* CONFIG_SLUB_TINY */
static void *__slab_alloc_node(struct kmem_cache *s,
		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
{
	struct partial_context pc;
	struct slab *slab;
	void *object;

	pc.flags = gfpflags;
	pc.orig_size = orig_size;
3701
	slab = get_partial(s, node, &pc);
3702

3703 3704
	if (slab)
		return pc.object;
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716

	slab = new_slab(s, gfpflags, node);
	if (unlikely(!slab)) {
		slab_out_of_memory(s, gfpflags, node);
		return NULL;
	}

	object = alloc_single_from_new_slab(s, slab, orig_size);

	return object;
}
#endif /* CONFIG_SLUB_TINY */
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729

/*
 * If the object has been wiped upon free, make sure it's fully initialized by
 * zeroing out freelist pointer.
 */
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
						   void *obj)
{
	if (unlikely(slab_want_init_on_free(s)) && obj)
		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
			0, sizeof(void *));
}

3730 3731 3732 3733 3734 3735 3736 3737
noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
	if (__should_failslab(s, gfpflags))
		return -ENOMEM;
	return 0;
}
ALLOW_ERROR_INJECTION(should_failslab, ERRNO);

3738 3739 3740 3741 3742
static __fastpath_inline
struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
				       struct list_lru *lru,
				       struct obj_cgroup **objcgp,
				       size_t size, gfp_t flags)
3743 3744 3745 3746 3747
{
	flags &= gfp_allowed_mask;

	might_alloc(flags);

3748
	if (unlikely(should_failslab(s, flags)))
3749 3750
		return NULL;

3751
	if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)))
3752 3753 3754 3755 3756
		return NULL;

	return s;
}

3757 3758 3759 3760
static __fastpath_inline
void slab_post_alloc_hook(struct kmem_cache *s,	struct obj_cgroup *objcg,
			  gfp_t flags, size_t size, void **p, bool init,
			  unsigned int orig_size)
3761 3762 3763 3764
{
	unsigned int zero_size = s->object_size;
	bool kasan_init = init;
	size_t i;
3765
	gfp_t init_flags = flags & gfp_allowed_mask;
3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779

	/*
	 * For kmalloc object, the allocated memory size(object_size) is likely
	 * larger than the requested size(orig_size). If redzone check is
	 * enabled for the extra space, don't zero it, as it will be redzoned
	 * soon. The redzone operation for this extra space could be seen as a
	 * replacement of current poisoning under certain debug option, and
	 * won't break other sanity checks.
	 */
	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
	    (s->flags & SLAB_KMALLOC))
		zero_size = orig_size;

	/*
3780
	 * When slab_debug is enabled, avoid memory initialization integrated
3781 3782 3783
	 * into KASAN and instead zero out the memory via the memset below with
	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
	 * cause false-positive reports. This does not lead to a performance
3784
	 * penalty on production builds, as slab_debug is not intended to be
3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
	 * enabled there.
	 */
	if (__slub_debug_enabled())
		kasan_init = false;

	/*
	 * As memory initialization might be integrated into KASAN,
	 * kasan_slab_alloc and initialization memset must be
	 * kept together to avoid discrepancies in behavior.
	 *
	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
	 */
	for (i = 0; i < size; i++) {
3798
		p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
3799 3800 3801 3802
		if (p[i] && init && (!kasan_init ||
				     !kasan_has_integrated_init()))
			memset(p[i], 0, zero_size);
		kmemleak_alloc_recursive(p[i], s->object_size, 1,
3803 3804
					 s->flags, init_flags);
		kmsan_slab_alloc(s, p[i], init_flags);
3805 3806 3807 3808 3809
	}

	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}

3810 3811 3812 3813 3814 3815 3816 3817 3818 3819
/*
 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
 * have the fastpath folded into their functions. So no function call
 * overhead for requests that can be satisfied on the fastpath.
 *
 * The fastpath works by first checking if the lockless freelist can be used.
 * If not then __slab_alloc is called for slow processing.
 *
 * Otherwise we can simply pick the next object from the lockless free list.
 */
3820
static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
3821 3822 3823 3824 3825 3826 3827
		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
{
	void *object;
	struct obj_cgroup *objcg = NULL;
	bool init = false;

	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
3828
	if (unlikely(!s))
3829 3830 3831 3832 3833 3834 3835 3836
		return NULL;

	object = kfence_alloc(s, orig_size, gfpflags);
	if (unlikely(object))
		goto out;

	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);

3837
	maybe_wipe_obj_freeptr(s, object);
3838
	init = slab_want_init_on_alloc(gfpflags, s);
3839

3840
out:
3841 3842 3843 3844 3845
	/*
	 * When init equals 'true', like for kzalloc() family, only
	 * @orig_size bytes might be zeroed instead of s->object_size
	 */
	slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
3846

3847
	return object;
Christoph Lameter's avatar
Christoph Lameter committed
3848 3849
}

3850
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
3851
{
3852 3853
	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
				    s->object_size);
3854

3855
	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3856 3857

	return ret;
3858
}
Christoph Lameter's avatar
Christoph Lameter committed
3859
EXPORT_SYMBOL(kmem_cache_alloc);
3860

3861 3862
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
			   gfp_t gfpflags)
Christoph Lameter's avatar
Christoph Lameter committed
3863
{
3864 3865
	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
				    s->object_size);
3866

3867
	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3868 3869

	return ret;
Christoph Lameter's avatar
Christoph Lameter committed
3870
}
3871 3872
EXPORT_SYMBOL(kmem_cache_alloc_lru);

3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @s: The cache to allocate from.
 * @gfpflags: See kmalloc().
 * @node: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 *
 * Return: pointer to the new object or %NULL in case of error
 */
Christoph Lameter's avatar
Christoph Lameter committed
3886 3887
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
3888
	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
3889

3890
	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
3891 3892

	return ret;
Christoph Lameter's avatar
Christoph Lameter committed
3893 3894 3895
}
EXPORT_SYMBOL(kmem_cache_alloc_node);

3896 3897 3898 3899 3900 3901
/*
 * To avoid unnecessary overhead, we pass through large allocation requests
 * directly to the page allocator. We use __GFP_COMP, because we will need to
 * know the allocation order to free the pages properly in kfree.
 */
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
3902
{
3903
	struct folio *folio;
3904 3905 3906 3907 3908 3909 3910
	void *ptr = NULL;
	unsigned int order = get_order(size);

	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);

	flags |= __GFP_COMP;
3911 3912 3913 3914
	folio = (struct folio *)alloc_pages_node(node, flags, order);
	if (folio) {
		ptr = folio_address(folio);
		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
3915 3916 3917 3918 3919 3920 3921 3922 3923
				      PAGE_SIZE << order);
	}

	ptr = kasan_kmalloc_large(ptr, size, flags);
	/* As ptr might get tagged, call kmemleak hook after KASAN. */
	kmemleak_alloc(ptr, size, 1, flags);
	kmsan_kmalloc_large(ptr, size, flags);

	return ptr;
3924
}
Christoph Lameter's avatar
Christoph Lameter committed
3925

3926
void *kmalloc_large(size_t size, gfp_t flags)
3927
{
3928 3929 3930 3931 3932
	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);

	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
		      flags, NUMA_NO_NODE);
	return ret;
3933
}
3934
EXPORT_SYMBOL(kmalloc_large);
3935

3936
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3937
{
3938 3939 3940 3941 3942
	void *ret = __kmalloc_large_node(size, flags, node);

	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
		      flags, node);
	return ret;
3943
}
3944
EXPORT_SYMBOL(kmalloc_large_node);
3945

3946 3947 3948
static __always_inline
void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
			unsigned long caller)
Christoph Lameter's avatar
Christoph Lameter committed
3949
{
3950 3951
	struct kmem_cache *s;
	void *ret;
3952

3953 3954 3955 3956 3957 3958
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
		ret = __kmalloc_large_node(size, flags, node);
		trace_kmalloc(caller, ret, size,
			      PAGE_SIZE << get_order(size), flags, node);
		return ret;
	}
3959

3960 3961 3962 3963 3964 3965 3966 3967
	if (unlikely(!size))
		return ZERO_SIZE_PTR;

	s = kmalloc_slab(size, flags, caller);

	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
	ret = kasan_kmalloc(s, ret, size, flags);
	trace_kmalloc(caller, ret, size, s->size, flags, node);
3968
	return ret;
Christoph Lameter's avatar
Christoph Lameter committed
3969
}
3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);

void *__kmalloc(size_t size, gfp_t flags)
{
	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
				  int node, unsigned long caller)
{
	return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);

void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
					    _RET_IP_, size);

	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_trace);

void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
			 int node, size_t size)
{
	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);

	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_node_trace);
Christoph Lameter's avatar
Christoph Lameter committed
4013

4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074
static noinline void free_to_partial_list(
	struct kmem_cache *s, struct slab *slab,
	void *head, void *tail, int bulk_cnt,
	unsigned long addr)
{
	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
	struct slab *slab_free = NULL;
	int cnt = bulk_cnt;
	unsigned long flags;
	depot_stack_handle_t handle = 0;

	if (s->flags & SLAB_STORE_USER)
		handle = set_track_prepare();

	spin_lock_irqsave(&n->list_lock, flags);

	if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
		void *prior = slab->freelist;

		/* Perform the actual freeing while we still hold the locks */
		slab->inuse -= cnt;
		set_freepointer(s, tail, prior);
		slab->freelist = head;

		/*
		 * If the slab is empty, and node's partial list is full,
		 * it should be discarded anyway no matter it's on full or
		 * partial list.
		 */
		if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
			slab_free = slab;

		if (!prior) {
			/* was on full list */
			remove_full(s, n, slab);
			if (!slab_free) {
				add_partial(n, slab, DEACTIVATE_TO_TAIL);
				stat(s, FREE_ADD_PARTIAL);
			}
		} else if (slab_free) {
			remove_partial(n, slab);
			stat(s, FREE_REMOVE_PARTIAL);
		}
	}

	if (slab_free) {
		/*
		 * Update the counters while still holding n->list_lock to
		 * prevent spurious validation warnings
		 */
		dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
	}

	spin_unlock_irqrestore(&n->list_lock, flags);

	if (slab_free) {
		stat(s, FREE_SLAB);
		free_slab(s, slab_free);
	}
}

Christoph Lameter's avatar
Christoph Lameter committed
4075
/*
4076
 * Slow path handling. This may still be called frequently since objects
4077
 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter's avatar
Christoph Lameter committed
4078
 *
4079
 * So we still attempt to reduce cache line usage. Just take the slab
4080
 * lock and free the item. If there is no additional partial slab
4081
 * handling required then we can return immediately.
Christoph Lameter's avatar
Christoph Lameter committed
4082
 */
4083
static void __slab_free(struct kmem_cache *s, struct slab *slab,
4084 4085 4086
			void *head, void *tail, int cnt,
			unsigned long addr)

Christoph Lameter's avatar
Christoph Lameter committed
4087 4088
{
	void *prior;
4089
	int was_frozen;
4090
	struct slab new;
4091 4092
	unsigned long counters;
	struct kmem_cache_node *n = NULL;
4093
	unsigned long flags;
4094
	bool on_node_partial;
Christoph Lameter's avatar
Christoph Lameter committed
4095

4096
	stat(s, FREE_SLOWPATH);
Christoph Lameter's avatar
Christoph Lameter committed
4097

4098
	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4099
		free_to_partial_list(s, slab, head, tail, cnt, addr);
4100
		return;
4101
	}
Christoph Lameter's avatar
Christoph Lameter committed
4102

4103
	do {
4104 4105 4106 4107
		if (unlikely(n)) {
			spin_unlock_irqrestore(&n->list_lock, flags);
			n = NULL;
		}
4108 4109
		prior = slab->freelist;
		counters = slab->counters;
4110
		set_freepointer(s, tail, prior);
4111 4112
		new.counters = counters;
		was_frozen = new.frozen;
4113
		new.inuse -= cnt;
4114
		if ((!new.inuse || !prior) && !was_frozen) {
4115 4116
			/* Needs to be taken off a list */
			if (!kmem_cache_has_cpu_partial(s) || prior) {
4117

4118
				n = get_node(s, slab_nid(slab));
4119 4120 4121 4122 4123 4124 4125 4126 4127 4128
				/*
				 * Speculatively acquire the list_lock.
				 * If the cmpxchg does not succeed then we may
				 * drop the list_lock without any processing.
				 *
				 * Otherwise the list_lock will synchronize with
				 * other processors updating the list of slabs.
				 */
				spin_lock_irqsave(&n->list_lock, flags);

4129
				on_node_partial = slab_test_node_partial(slab);
4130
			}
4131
		}
Christoph Lameter's avatar
Christoph Lameter committed
4132

4133
	} while (!slab_update_freelist(s, slab,
4134
		prior, counters,
4135
		head, new.counters,
4136
		"__slab_free"));
Christoph Lameter's avatar
Christoph Lameter committed
4137

4138
	if (likely(!n)) {
4139

4140 4141 4142 4143 4144 4145
		if (likely(was_frozen)) {
			/*
			 * The list lock was not taken therefore no list
			 * activity can be necessary.
			 */
			stat(s, FREE_FROZEN);
4146
		} else if (kmem_cache_has_cpu_partial(s) && !prior) {
4147
			/*
4148
			 * If we started with a full slab then put it onto the
4149 4150
			 * per cpu partial list.
			 */
4151
			put_cpu_partial(s, slab, 1);
4152 4153
			stat(s, CPU_PARTIAL_FREE);
		}
4154

4155 4156
		return;
	}
Christoph Lameter's avatar
Christoph Lameter committed
4157

4158 4159 4160 4161 4162 4163 4164 4165 4166
	/*
	 * This slab was partially empty but not on the per-node partial list,
	 * in which case we shouldn't manipulate its list, just return.
	 */
	if (prior && !on_node_partial) {
		spin_unlock_irqrestore(&n->list_lock, flags);
		return;
	}

4167
	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4168 4169
		goto slab_empty;

Christoph Lameter's avatar
Christoph Lameter committed
4170
	/*
4171 4172
	 * Objects left in the slab. If it was not on the partial list before
	 * then add it.
Christoph Lameter's avatar
Christoph Lameter committed
4173
	 */
4174
	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4175
		add_partial(n, slab, DEACTIVATE_TO_TAIL);
4176
		stat(s, FREE_ADD_PARTIAL);
4177
	}
4178
	spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter's avatar
Christoph Lameter committed
4179 4180 4181
	return;

slab_empty:
4182
	if (prior) {
Christoph Lameter's avatar
Christoph Lameter committed
4183
		/*
4184
		 * Slab on the partial list.
Christoph Lameter's avatar
Christoph Lameter committed
4185
		 */
4186
		remove_partial(n, slab);
4187
		stat(s, FREE_REMOVE_PARTIAL);
4188
	}
4189

4190
	spin_unlock_irqrestore(&n->list_lock, flags);
4191
	stat(s, FREE_SLAB);
4192
	discard_slab(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
4193 4194
}

4195
#ifndef CONFIG_SLUB_TINY
4196 4197 4198 4199 4200 4201 4202 4203 4204 4205
/*
 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
 * can perform fastpath freeing without additional function calls.
 *
 * The fastpath is only possible if we are freeing to the current cpu slab
 * of this processor. This typically the case if we have just allocated
 * the item before.
 *
 * If fastpath is not possible then fall back to __slab_free where we deal
 * with all sorts of special processing.
4206 4207
 *
 * Bulk free of a freelist with several objects (all pointing to the
4208
 * same slab) possible by specifying head and tail ptr, plus objects
4209
 * count (cnt). Bulk free indicated by tail pointer being set.
4210
 */
4211
static __always_inline void do_slab_free(struct kmem_cache *s,
4212
				struct slab *slab, void *head, void *tail,
4213
				int cnt, unsigned long addr)
4214
{
4215
	struct kmem_cache_cpu *c;
4216
	unsigned long tid;
4217
	void **freelist;
4218

4219 4220 4221 4222 4223
redo:
	/*
	 * Determine the currently cpus per cpu slab.
	 * The cpu may change afterward. However that does not matter since
	 * data is retrieved via this pointer. If we are on the same cpu
4224
	 * during the cmpxchg then the free will succeed.
4225
	 */
4226 4227
	c = raw_cpu_ptr(s->cpu_slab);
	tid = READ_ONCE(c->tid);
4228

4229 4230
	/* Same with comment on barrier() in slab_alloc_node() */
	barrier();
4231

4232
	if (unlikely(slab != c->slab)) {
4233
		__slab_free(s, slab, head, tail, cnt, addr);
4234 4235 4236 4237 4238
		return;
	}

	if (USE_LOCKLESS_FAST_PATH()) {
		freelist = READ_ONCE(c->freelist);
4239

4240
		set_freepointer(s, tail, freelist);
4241

4242
		if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4243 4244 4245
			note_cmpxchg_failure("slab_free", s, tid);
			goto redo;
		}
4246 4247
	} else {
		/* Update the free list under the local lock */
4248 4249
		local_lock(&s->cpu_slab->lock);
		c = this_cpu_ptr(s->cpu_slab);
4250
		if (unlikely(slab != c->slab)) {
4251 4252 4253 4254 4255 4256
			local_unlock(&s->cpu_slab->lock);
			goto redo;
		}
		tid = c->tid;
		freelist = c->freelist;

4257
		set_freepointer(s, tail, freelist);
4258 4259 4260 4261
		c->freelist = head;
		c->tid = next_tid(tid);

		local_unlock(&s->cpu_slab->lock);
4262
	}
4263
	stat_add(s, FREE_FASTPATH, cnt);
4264
}
4265 4266 4267 4268 4269
#else /* CONFIG_SLUB_TINY */
static void do_slab_free(struct kmem_cache *s,
				struct slab *slab, void *head, void *tail,
				int cnt, unsigned long addr)
{
4270
	__slab_free(s, slab, head, tail, cnt, addr);
4271 4272
}
#endif /* CONFIG_SLUB_TINY */
4273

4274 4275 4276 4277 4278 4279
static __fastpath_inline
void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
	       unsigned long addr)
{
	memcg_slab_free_hook(s, slab, &object, 1);

4280
	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
4281 4282 4283 4284 4285 4286
		do_slab_free(s, slab, object, object, 1, addr);
}

static __fastpath_inline
void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
		    void *tail, void **p, int cnt, unsigned long addr)
4287
{
4288
	memcg_slab_free_hook(s, slab, p, cnt);
4289
	/*
4290 4291
	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
	 * to remove objects, whose reuse must be delayed.
4292
	 */
4293
	if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4294
		do_slab_free(s, slab, head, tail, cnt, addr);
4295 4296
}

4297
#ifdef CONFIG_KASAN_GENERIC
4298 4299
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
4300
	do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4301 4302 4303
}
#endif

4304
static inline struct kmem_cache *virt_to_cache(const void *obj)
4305
{
4306 4307 4308 4309 4310 4311
	struct slab *slab;

	slab = virt_to_slab(obj);
	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
		return NULL;
	return slab->slab_cache;
4312 4313
}

4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;

	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
		return s;

	cachep = virt_to_cache(x);
	if (WARN(cachep && cachep != s,
		 "%s: Wrong slab cache. %s but object is from %s\n",
		 __func__, s->name, cachep->name))
		print_tracking(cachep, x);
	return cachep;
}

4330 4331 4332 4333 4334 4335 4336 4337
/**
 * kmem_cache_free - Deallocate an object
 * @s: The cache the allocation was from.
 * @x: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
Christoph Lameter's avatar
Christoph Lameter committed
4338 4339
void kmem_cache_free(struct kmem_cache *s, void *x)
{
4340 4341
	s = cache_from_obj(s, x);
	if (!s)
4342
		return;
4343
	trace_kmem_cache_free(_RET_IP_, x, s);
4344
	slab_free(s, virt_to_slab(x), x, _RET_IP_);
Christoph Lameter's avatar
Christoph Lameter committed
4345 4346 4347
}
EXPORT_SYMBOL(kmem_cache_free);

4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
static void free_large_kmalloc(struct folio *folio, void *object)
{
	unsigned int order = folio_order(folio);

	if (WARN_ON_ONCE(order == 0))
		pr_warn_once("object pointer: 0x%p\n", object);

	kmemleak_free(object);
	kasan_kfree_large(object);
	kmsan_kfree_large(object);

4359
	lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4360
			      -(PAGE_SIZE << order));
4361
	folio_put(folio);
4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389
}

/**
 * kfree - free previously allocated memory
 * @object: pointer returned by kmalloc() or kmem_cache_alloc()
 *
 * If @object is NULL, no operation is performed.
 */
void kfree(const void *object)
{
	struct folio *folio;
	struct slab *slab;
	struct kmem_cache *s;
	void *x = (void *)object;

	trace_kfree(_RET_IP_, object);

	if (unlikely(ZERO_OR_NULL_PTR(object)))
		return;

	folio = virt_to_folio(object);
	if (unlikely(!folio_test_slab(folio))) {
		free_large_kmalloc(folio, (void *)object);
		return;
	}

	slab = folio_slab(folio);
	s = slab->slab_cache;
4390
	slab_free(s, slab, x, _RET_IP_);
4391 4392 4393
}
EXPORT_SYMBOL(kfree);

4394
struct detached_freelist {
4395
	struct slab *slab;
4396 4397 4398
	void *tail;
	void *freelist;
	int cnt;
4399
	struct kmem_cache *s;
4400
};
4401

4402 4403 4404
/*
 * This function progressively scans the array with free objects (with
 * a limited look ahead) and extract objects belonging to the same
4405 4406
 * slab.  It builds a detached freelist directly within the given
 * slab/objects.  This can happen without any need for
4407 4408 4409 4410 4411 4412 4413
 * synchronization, because the objects are owned by running process.
 * The freelist is build up as a single linked list in the objects.
 * The idea is, that this detached freelist can then be bulk
 * transferred to the real freelist(s), but only requiring a single
 * synchronization primitive.  Look ahead in the array is limited due
 * to performance reasons.
 */
4414 4415 4416
static inline
int build_detached_freelist(struct kmem_cache *s, size_t size,
			    void **p, struct detached_freelist *df)
4417 4418 4419
{
	int lookahead = 3;
	void *object;
4420
	struct folio *folio;
4421
	size_t same;
4422

4423
	object = p[--size];
4424
	folio = virt_to_folio(object);
4425 4426
	if (!s) {
		/* Handle kalloc'ed objects */
4427
		if (unlikely(!folio_test_slab(folio))) {
4428
			free_large_kmalloc(folio, object);
4429
			df->slab = NULL;
4430 4431 4432
			return size;
		}
		/* Derive kmem_cache from object */
4433 4434
		df->slab = folio_slab(folio);
		df->s = df->slab->slab_cache;
4435
	} else {
4436
		df->slab = folio_slab(folio);
4437 4438
		df->s = cache_from_obj(s, object); /* Support for memcg */
	}
4439

4440 4441 4442 4443 4444
	/* Start new detached freelist */
	df->tail = object;
	df->freelist = object;
	df->cnt = 1;

4445 4446 4447 4448 4449 4450
	if (is_kfence_address(object))
		return size;

	set_freepointer(df->s, object, NULL);

	same = size;
4451 4452
	while (size) {
		object = p[--size];
4453 4454
		/* df->slab is always set at this point */
		if (df->slab == virt_to_slab(object)) {
4455
			/* Opportunity build freelist */
4456
			set_freepointer(df->s, object, df->freelist);
4457 4458
			df->freelist = object;
			df->cnt++;
4459 4460 4461
			same--;
			if (size != same)
				swap(p[size], p[same]);
4462
			continue;
4463
		}
4464 4465 4466 4467

		/* Limit look ahead search */
		if (!--lookahead)
			break;
4468
	}
4469

4470
	return same;
4471 4472
}

4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493
/*
 * Internal bulk free of objects that were not initialised by the post alloc
 * hooks and thus should not be processed by the free hooks
 */
static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
	if (!size)
		return;

	do {
		struct detached_freelist df;

		size = build_detached_freelist(s, size, p, &df);
		if (!df.slab)
			continue;

		do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
			     _RET_IP_);
	} while (likely(size));
}

4494
/* Note that interrupts must be enabled when calling this function. */
4495
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4496
{
4497
	if (!size)
4498 4499 4500 4501 4502 4503
		return;

	do {
		struct detached_freelist df;

		size = build_detached_freelist(s, size, p, &df);
4504
		if (!df.slab)
4505 4506
			continue;

4507 4508
		slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
			       df.cnt, _RET_IP_);
4509
	} while (likely(size));
4510 4511 4512
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

4513
#ifndef CONFIG_SLUB_TINY
4514 4515 4516
static inline
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
			    void **p)
4517
{
4518
	struct kmem_cache_cpu *c;
4519
	unsigned long irqflags;
4520 4521 4522 4523 4524 4525 4526
	int i;

	/*
	 * Drain objects in the per cpu slab, while disabling local
	 * IRQs, which protects against PREEMPT and interrupts
	 * handlers invoking normal fastpath.
	 */
4527
	c = slub_get_cpu_ptr(s->cpu_slab);
4528
	local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4529 4530

	for (i = 0; i < size; i++) {
4531
		void *object = kfence_alloc(s, s->object_size, flags);
4532

4533 4534 4535 4536 4537 4538
		if (unlikely(object)) {
			p[i] = object;
			continue;
		}

		object = c->freelist;
4539
		if (unlikely(!object)) {
4540 4541 4542 4543 4544 4545 4546 4547 4548
			/*
			 * We may have removed an object from c->freelist using
			 * the fastpath in the previous iteration; in that case,
			 * c->tid has not been bumped yet.
			 * Since ___slab_alloc() may reenable interrupts while
			 * allocating memory, we should bump c->tid now.
			 */
			c->tid = next_tid(c->tid);

4549
			local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4550

4551 4552 4553 4554
			/*
			 * Invoking slow path likely have side-effect
			 * of re-populating per CPU c->freelist
			 */
4555
			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
4556
					    _RET_IP_, c, s->object_size);
4557 4558 4559
			if (unlikely(!p[i]))
				goto error;

4560
			c = this_cpu_ptr(s->cpu_slab);
4561 4562
			maybe_wipe_obj_freeptr(s, p[i]);

4563
			local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4564

4565 4566
			continue; /* goto for-loop */
		}
4567 4568
		c->freelist = get_freepointer(s, object);
		p[i] = object;
4569
		maybe_wipe_obj_freeptr(s, p[i]);
4570
		stat(s, ALLOC_FASTPATH);
4571 4572
	}
	c->tid = next_tid(c->tid);
4573
	local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4574
	slub_put_cpu_ptr(s->cpu_slab);
4575

4576
	return i;
4577

4578
error:
4579
	slub_put_cpu_ptr(s->cpu_slab);
4580
	__kmem_cache_free_bulk(s, i, p);
4581
	return 0;
4582 4583

}
4584 4585
#else /* CONFIG_SLUB_TINY */
static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
4586
				   size_t size, void **p)
4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608
{
	int i;

	for (i = 0; i < size; i++) {
		void *object = kfence_alloc(s, s->object_size, flags);

		if (unlikely(object)) {
			p[i] = object;
			continue;
		}

		p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
					 _RET_IP_, s->object_size);
		if (unlikely(!p[i]))
			goto error;

		maybe_wipe_obj_freeptr(s, p[i]);
	}

	return i;

error:
4609
	__kmem_cache_free_bulk(s, i, p);
4610 4611 4612
	return 0;
}
#endif /* CONFIG_SLUB_TINY */
4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628

/* Note that interrupts must be enabled when calling this function. */
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
			  void **p)
{
	int i;
	struct obj_cgroup *objcg = NULL;

	if (!size)
		return 0;

	/* memcg and kmem_cache debug support */
	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
	if (unlikely(!s))
		return 0;

4629
	i = __kmem_cache_alloc_bulk(s, flags, size, p);
4630 4631 4632 4633 4634

	/*
	 * memcg and kmem_cache debug support and memory initialization.
	 * Done outside of the IRQ disabled fastpath loop.
	 */
4635
	if (likely(i != 0)) {
4636
		slab_post_alloc_hook(s, objcg, flags, size, p,
4637
			slab_want_init_on_alloc(flags, s), s->object_size);
4638 4639 4640 4641
	} else {
		memcg_slab_alloc_error_hook(s, size, objcg);
	}

4642
	return i;
4643 4644 4645 4646
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);


Christoph Lameter's avatar
Christoph Lameter committed
4647
/*
Christoph Lameter's avatar
Christoph Lameter committed
4648 4649 4650 4651
 * Object placement in a slab is made very easy because we always start at
 * offset 0. If we tune the size of the object to the alignment then we can
 * get the required alignment by putting one properly sized object after
 * another.
Christoph Lameter's avatar
Christoph Lameter committed
4652 4653 4654 4655
 *
 * Notice that the allocation order determines the sizes of the per cpu
 * caches. Each processor has always one slab available for allocations.
 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter's avatar
Christoph Lameter committed
4656
 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter's avatar
Christoph Lameter committed
4657 4658 4659 4660
 * locking overhead.
 */

/*
Ingo Molnar's avatar
Ingo Molnar committed
4661
 * Minimum / Maximum order of slab pages. This influences locking overhead
Christoph Lameter's avatar
Christoph Lameter committed
4662 4663 4664 4665
 * and slab fragmentation. A higher order reduces the number of partial slabs
 * and increases the number of allocations possible without having to
 * take the list_lock.
 */
4666
static unsigned int slub_min_order;
4667 4668
static unsigned int slub_max_order =
	IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
4669
static unsigned int slub_min_objects;
Christoph Lameter's avatar
Christoph Lameter committed
4670 4671 4672 4673

/*
 * Calculate the order of allocation given an slab object size.
 *
Christoph Lameter's avatar
Christoph Lameter committed
4674 4675 4676 4677
 * The order of allocation has significant impact on performance and other
 * system components. Generally order 0 allocations should be preferred since
 * order 0 does not cause fragmentation in the page allocator. Larger objects
 * be problematic to put into order 0 slabs because there may be too much
4678
 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter's avatar
Christoph Lameter committed
4679 4680 4681 4682 4683 4684
 * would be wasted.
 *
 * In order to reach satisfactory performance we must ensure that a minimum
 * number of objects is in one slab. Otherwise we may generate too much
 * activity on the partial lists which requires taking the list_lock. This is
 * less a concern for large slabs though which are rarely used.
Christoph Lameter's avatar
Christoph Lameter committed
4685
 *
4686 4687
 * slab_max_order specifies the order where we begin to stop considering the
 * number of objects in a slab as critical. If we reach slab_max_order then
Christoph Lameter's avatar
Christoph Lameter committed
4688 4689
 * we try to keep the page order as low as possible. So we accept more waste
 * of space in favor of a small page order.
Christoph Lameter's avatar
Christoph Lameter committed
4690
 *
Christoph Lameter's avatar
Christoph Lameter committed
4691 4692
 * Higher order allocations also allow the placement of more objects in a
 * slab and thereby reduce object handling overhead. If the user has
4693
 * requested a higher minimum order then we start with that one instead of
Christoph Lameter's avatar
Christoph Lameter committed
4694
 * the smallest order which will fit the object.
Christoph Lameter's avatar
Christoph Lameter committed
4695
 */
4696
static inline unsigned int calc_slab_order(unsigned int size,
4697
		unsigned int min_order, unsigned int max_order,
4698
		unsigned int fract_leftover)
Christoph Lameter's avatar
Christoph Lameter committed
4699
{
4700
	unsigned int order;
Christoph Lameter's avatar
Christoph Lameter committed
4701

4702
	for (order = min_order; order <= max_order; order++) {
Christoph Lameter's avatar
Christoph Lameter committed
4703

4704 4705
		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
		unsigned int rem;
Christoph Lameter's avatar
Christoph Lameter committed
4706

4707
		rem = slab_size % size;
Christoph Lameter's avatar
Christoph Lameter committed
4708

4709
		if (rem <= slab_size / fract_leftover)
Christoph Lameter's avatar
Christoph Lameter committed
4710 4711
			break;
	}
Christoph Lameter's avatar
Christoph Lameter committed
4712

Christoph Lameter's avatar
Christoph Lameter committed
4713 4714 4715
	return order;
}

4716
static inline int calculate_order(unsigned int size)
4717
{
4718 4719 4720
	unsigned int order;
	unsigned int min_objects;
	unsigned int max_objects;
4721
	unsigned int min_order;
4722 4723

	min_objects = slub_min_objects;
4724 4725 4726 4727 4728 4729 4730 4731 4732 4733
	if (!min_objects) {
		/*
		 * Some architectures will only update present cpus when
		 * onlining them, so don't trust the number if it's just 1. But
		 * we also don't want to use nr_cpu_ids always, as on some other
		 * architectures, there can be many possible cpus, but never
		 * onlined. Here we compromise between trying to avoid too high
		 * order on systems that appear larger than they are, and too
		 * low order on systems that appear smaller than they are.
		 */
4734
		unsigned int nr_cpus = num_present_cpus();
4735 4736 4737 4738
		if (nr_cpus <= 1)
			nr_cpus = nr_cpu_ids;
		min_objects = 4 * (fls(nr_cpus) + 1);
	}
4739 4740
	/* min_objects can't be 0 because get_order(0) is undefined */
	max_objects = max(order_objects(slub_max_order, size), 1U);
4741 4742
	min_objects = min(min_objects, max_objects);

4743 4744 4745 4746 4747
	min_order = max_t(unsigned int, slub_min_order,
			  get_order(min_objects * size));
	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
		return get_order(size * MAX_OBJS_PER_PAGE) - 1;

4748 4749 4750 4751 4752 4753
	/*
	 * Attempt to find best configuration for a slab. This works by first
	 * attempting to generate a layout with the best possible configuration
	 * and backing off gradually.
	 *
	 * We start with accepting at most 1/16 waste and try to find the
4754 4755
	 * smallest order from min_objects-derived/slab_min_order up to
	 * slab_max_order that will satisfy the constraint. Note that increasing
4756 4757 4758
	 * the order can only result in same or less fractional waste, not more.
	 *
	 * If that fails, we increase the acceptable fraction of waste and try
4759 4760
	 * again. The last iteration with fraction of 1/2 would effectively
	 * accept any waste and give us the order determined by min_objects, as
4761
	 * long as at least single object fits within slab_max_order.
4762
	 */
4763
	for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
4764
		order = calc_slab_order(size, min_order, slub_max_order,
4765 4766 4767
					fraction);
		if (order <= slub_max_order)
			return order;
4768 4769 4770
	}

	/*
4771
	 * Doh this slab cannot be placed using slab_max_order.
4772
	 */
4773
	order = get_order(size);
4774
	if (order <= MAX_PAGE_ORDER)
4775 4776 4777 4778
		return order;
	return -ENOSYS;
}

4779
static void
4780
init_kmem_cache_node(struct kmem_cache_node *n)
Christoph Lameter's avatar
Christoph Lameter committed
4781 4782 4783 4784
{
	n->nr_partial = 0;
	spin_lock_init(&n->list_lock);
	INIT_LIST_HEAD(&n->partial);
4785
#ifdef CONFIG_SLUB_DEBUG
4786
	atomic_long_set(&n->nr_slabs, 0);
4787
	atomic_long_set(&n->total_objects, 0);
4788
	INIT_LIST_HEAD(&n->full);
4789
#endif
Christoph Lameter's avatar
Christoph Lameter committed
4790 4791
}

4792
#ifndef CONFIG_SLUB_TINY
4793
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4794
{
4795
	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
4796 4797
			NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
			sizeof(struct kmem_cache_cpu));
4798

4799
	/*
4800 4801
	 * Must align to double word boundary for the double cmpxchg
	 * instructions to work; see __pcpu_double_call_return_bool().
4802
	 */
4803 4804
	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
				     2 * sizeof(void *));
4805 4806 4807 4808 4809

	if (!s->cpu_slab)
		return 0;

	init_kmem_cache_cpus(s);
4810

4811
	return 1;
4812
}
4813 4814 4815 4816 4817 4818
#else
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
	return 1;
}
#endif /* CONFIG_SLUB_TINY */
4819

4820 4821
static struct kmem_cache *kmem_cache_node;

Christoph Lameter's avatar
Christoph Lameter committed
4822 4823 4824 4825 4826
/*
 * No kmalloc_node yet so do it by hand. We know that this is the first
 * slab on the node for this slabcache. There are no concurrent accesses
 * possible.
 *
4827 4828
 * Note that this function only works on the kmem_cache_node
 * when allocating for the kmem_cache_node. This is used for bootstrapping
4829
 * memory on a fresh node that has no slab structures yet.
Christoph Lameter's avatar
Christoph Lameter committed
4830
 */
4831
static void early_kmem_cache_node_alloc(int node)
Christoph Lameter's avatar
Christoph Lameter committed
4832
{
4833
	struct slab *slab;
Christoph Lameter's avatar
Christoph Lameter committed
4834 4835
	struct kmem_cache_node *n;

4836
	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
Christoph Lameter's avatar
Christoph Lameter committed
4837

4838
	slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
Christoph Lameter's avatar
Christoph Lameter committed
4839

4840 4841
	BUG_ON(!slab);
	if (slab_nid(slab) != node) {
4842 4843
		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
4844 4845
	}

4846
	n = slab->freelist;
Christoph Lameter's avatar
Christoph Lameter committed
4847
	BUG_ON(!n);
4848
#ifdef CONFIG_SLUB_DEBUG
4849
	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
4850
	init_tracking(kmem_cache_node, n);
4851
#endif
4852
	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
4853 4854
	slab->freelist = get_freepointer(kmem_cache_node, n);
	slab->inuse = 1;
4855
	kmem_cache_node->node[node] = n;
4856
	init_kmem_cache_node(n);
4857
	inc_slabs_node(kmem_cache_node, node, slab->objects);
Christoph Lameter's avatar
Christoph Lameter committed
4858

4859
	/*
4860 4861
	 * No locks need to be taken here as it has just been
	 * initialized and there is no concurrent access.
4862
	 */
4863
	__add_partial(n, slab, DEACTIVATE_TO_HEAD);
Christoph Lameter's avatar
Christoph Lameter committed
4864 4865 4866 4867 4868
}

static void free_kmem_cache_nodes(struct kmem_cache *s)
{
	int node;
4869
	struct kmem_cache_node *n;
Christoph Lameter's avatar
Christoph Lameter committed
4870

4871
	for_each_kmem_cache_node(s, node, n) {
Christoph Lameter's avatar
Christoph Lameter committed
4872
		s->node[node] = NULL;
4873
		kmem_cache_free(kmem_cache_node, n);
Christoph Lameter's avatar
Christoph Lameter committed
4874 4875 4876
	}
}

4877 4878
void __kmem_cache_release(struct kmem_cache *s)
{
4879
	cache_random_seq_destroy(s);
4880
#ifndef CONFIG_SLUB_TINY
4881
	free_percpu(s->cpu_slab);
4882
#endif
4883 4884 4885
	free_kmem_cache_nodes(s);
}

4886
static int init_kmem_cache_nodes(struct kmem_cache *s)
Christoph Lameter's avatar
Christoph Lameter committed
4887 4888 4889
{
	int node;

4890
	for_each_node_mask(node, slab_nodes) {
Christoph Lameter's avatar
Christoph Lameter committed
4891 4892
		struct kmem_cache_node *n;

4893
		if (slab_state == DOWN) {
4894
			early_kmem_cache_node_alloc(node);
4895 4896
			continue;
		}
4897
		n = kmem_cache_alloc_node(kmem_cache_node,
4898
						GFP_KERNEL, node);
Christoph Lameter's avatar
Christoph Lameter committed
4899

4900 4901 4902
		if (!n) {
			free_kmem_cache_nodes(s);
			return 0;
Christoph Lameter's avatar
Christoph Lameter committed
4903
		}
4904

4905
		init_kmem_cache_node(n);
4906
		s->node[node] = n;
Christoph Lameter's avatar
Christoph Lameter committed
4907 4908 4909 4910
	}
	return 1;
}

4911 4912 4913
static void set_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
4914 4915
	unsigned int nr_objects;

4916 4917 4918 4919 4920 4921 4922 4923 4924
	/*
	 * cpu_partial determined the maximum number of objects kept in the
	 * per cpu partial lists of a processor.
	 *
	 * Per cpu partial lists mainly contain slabs that just have one
	 * object freed. If they are used for allocation then they can be
	 * filled up again with minimal effort. The slab will never hit the
	 * per node partial lists and therefore no locking will be required.
	 *
4925 4926 4927
	 * For backwards compatibility reasons, this is determined as number
	 * of objects, even though we now limit maximum number of pages, see
	 * slub_set_cpu_partial()
4928 4929
	 */
	if (!kmem_cache_has_cpu_partial(s))
4930
		nr_objects = 0;
4931
	else if (s->size >= PAGE_SIZE)
4932
		nr_objects = 6;
4933
	else if (s->size >= 1024)
4934
		nr_objects = 24;
4935
	else if (s->size >= 256)
4936
		nr_objects = 52;
4937
	else
4938
		nr_objects = 120;
4939 4940

	slub_set_cpu_partial(s, nr_objects);
4941 4942 4943
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
4944 4945 4946 4947
/*
 * calculate_sizes() determines the order and the distribution of data within
 * a slab object.
 */
4948
static int calculate_sizes(struct kmem_cache *s)
Christoph Lameter's avatar
Christoph Lameter committed
4949
{
4950
	slab_flags_t flags = s->flags;
4951
	unsigned int size = s->object_size;
4952
	unsigned int order;
Christoph Lameter's avatar
Christoph Lameter committed
4953

4954 4955 4956 4957 4958 4959 4960 4961
	/*
	 * Round up object size to the next word boundary. We can only
	 * place the free pointer at word boundaries and this determines
	 * the possible location of the free pointer.
	 */
	size = ALIGN(size, sizeof(void *));

#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter's avatar
Christoph Lameter committed
4962 4963 4964 4965 4966
	/*
	 * Determine if we can poison the object itself. If the user of
	 * the slab may touch the object after free or before allocation
	 * then we should never poison the object itself.
	 */
4967
	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
4968
			!s->ctor)
Christoph Lameter's avatar
Christoph Lameter committed
4969 4970 4971 4972 4973 4974
		s->flags |= __OBJECT_POISON;
	else
		s->flags &= ~__OBJECT_POISON;


	/*
Christoph Lameter's avatar
Christoph Lameter committed
4975
	 * If we are Redzoning then check if there is some space between the
Christoph Lameter's avatar
Christoph Lameter committed
4976
	 * end of the object and the free pointer. If not then add an
Christoph Lameter's avatar
Christoph Lameter committed
4977
	 * additional word to have some bytes to store Redzone information.
Christoph Lameter's avatar
Christoph Lameter committed
4978
	 */
4979
	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
Christoph Lameter's avatar
Christoph Lameter committed
4980
		size += sizeof(void *);
4981
#endif
Christoph Lameter's avatar
Christoph Lameter committed
4982 4983

	/*
Christoph Lameter's avatar
Christoph Lameter committed
4984
	 * With that we have determined the number of bytes in actual use
4985
	 * by the object and redzoning.
Christoph Lameter's avatar
Christoph Lameter committed
4986 4987 4988
	 */
	s->inuse = size;

4989 4990
	if (slub_debug_orig_size(s) ||
	    (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4991 4992
	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
	    s->ctor) {
Christoph Lameter's avatar
Christoph Lameter committed
4993 4994 4995 4996 4997 4998
		/*
		 * Relocate free pointer after the object if it is not
		 * permitted to overwrite the first word of the object on
		 * kmem_cache_free.
		 *
		 * This is the case if we do RCU, have a constructor or
4999 5000
		 * destructor, are poisoning the objects, or are
		 * redzoning an object smaller than sizeof(void *).
5001 5002 5003 5004 5005
		 *
		 * The assumption that s->offset >= s->inuse means free
		 * pointer is outside of the object is used in the
		 * freeptr_outside_object() function. If that is no
		 * longer true, the function needs to be modified.
Christoph Lameter's avatar
Christoph Lameter committed
5006 5007 5008
		 */
		s->offset = size;
		size += sizeof(void *);
5009
	} else {
5010 5011 5012 5013 5014
		/*
		 * Store freelist pointer near middle of object to keep
		 * it away from the edges of the object to avoid small
		 * sized over/underflows from neighboring allocations.
		 */
5015
		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
Christoph Lameter's avatar
Christoph Lameter committed
5016 5017
	}

5018
#ifdef CONFIG_SLUB_DEBUG
5019
	if (flags & SLAB_STORE_USER) {
Christoph Lameter's avatar
Christoph Lameter committed
5020 5021 5022 5023 5024
		/*
		 * Need to store information about allocs and frees after
		 * the object.
		 */
		size += 2 * sizeof(struct track);
5025 5026 5027 5028 5029

		/* Save the original kmalloc request size */
		if (flags & SLAB_KMALLOC)
			size += sizeof(unsigned int);
	}
5030
#endif
Christoph Lameter's avatar
Christoph Lameter committed
5031

5032 5033
	kasan_cache_create(s, &size, &s->flags);
#ifdef CONFIG_SLUB_DEBUG
5034
	if (flags & SLAB_RED_ZONE) {
Christoph Lameter's avatar
Christoph Lameter committed
5035 5036 5037 5038
		/*
		 * Add some empty padding so that we can catch
		 * overwrites from earlier objects rather than let
		 * tracking information or the free pointer be
5039
		 * corrupted if a user writes before the start
Christoph Lameter's avatar
Christoph Lameter committed
5040 5041 5042
		 * of the object.
		 */
		size += sizeof(void *);
5043 5044 5045 5046 5047

		s->red_left_pad = sizeof(void *);
		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
		size += s->red_left_pad;
	}
5048
#endif
Christoph Lameter's avatar
Christoph Lameter committed
5049

Christoph Lameter's avatar
Christoph Lameter committed
5050 5051 5052 5053 5054
	/*
	 * SLUB stores one object immediately after another beginning from
	 * offset 0. In order to align the objects we have to simply size
	 * each object to conform to the alignment.
	 */
5055
	size = ALIGN(size, s->align);
Christoph Lameter's avatar
Christoph Lameter committed
5056
	s->size = size;
5057
	s->reciprocal_size = reciprocal_value(size);
5058
	order = calculate_order(size);
Christoph Lameter's avatar
Christoph Lameter committed
5059

5060
	if ((int)order < 0)
Christoph Lameter's avatar
Christoph Lameter committed
5061 5062
		return 0;

5063
	s->allocflags = 0;
5064
	if (order)
5065 5066 5067
		s->allocflags |= __GFP_COMP;

	if (s->flags & SLAB_CACHE_DMA)
5068
		s->allocflags |= GFP_DMA;
5069

5070 5071 5072
	if (s->flags & SLAB_CACHE_DMA32)
		s->allocflags |= GFP_DMA32;

5073 5074 5075
	if (s->flags & SLAB_RECLAIM_ACCOUNT)
		s->allocflags |= __GFP_RECLAIMABLE;

Christoph Lameter's avatar
Christoph Lameter committed
5076 5077 5078
	/*
	 * Determine the number of objects per slab
	 */
5079 5080
	s->oo = oo_make(order, size);
	s->min = oo_make(get_order(size), size);
Christoph Lameter's avatar
Christoph Lameter committed
5081

5082
	return !!oo_objects(s->oo);
Christoph Lameter's avatar
Christoph Lameter committed
5083 5084
}

5085
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
Christoph Lameter's avatar
Christoph Lameter committed
5086
{
5087
	s->flags = kmem_cache_flags(flags, s->name);
5088 5089 5090
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	s->random = get_random_long();
#endif
Christoph Lameter's avatar
Christoph Lameter committed
5091

5092
	if (!calculate_sizes(s))
Christoph Lameter's avatar
Christoph Lameter committed
5093
		goto error;
5094 5095 5096 5097 5098
	if (disable_higher_order_debug) {
		/*
		 * Disable debugging flags that store metadata if the min slab
		 * order increased.
		 */
5099
		if (get_order(s->size) > get_order(s->object_size)) {
5100 5101
			s->flags &= ~DEBUG_METADATA_FLAGS;
			s->offset = 0;
5102
			if (!calculate_sizes(s))
5103 5104 5105
				goto error;
		}
	}
Christoph Lameter's avatar
Christoph Lameter committed
5106

5107 5108
#ifdef system_has_freelist_aba
	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5109 5110
		/* Enable fast mode */
		s->flags |= __CMPXCHG_DOUBLE;
5111
	}
5112 5113
#endif

5114
	/*
5115
	 * The larger the object size is, the more slabs we want on the partial
5116 5117
	 * list to avoid pounding the page allocator excessively.
	 */
5118 5119
	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5120

5121
	set_cpu_partial(s);
5122

Christoph Lameter's avatar
Christoph Lameter committed
5123
#ifdef CONFIG_NUMA
5124
	s->remote_node_defrag_ratio = 1000;
Christoph Lameter's avatar
Christoph Lameter committed
5125
#endif
5126 5127 5128 5129 5130 5131 5132

	/* Initialize the pre-computed randomized freelist if slab is up */
	if (slab_state >= UP) {
		if (init_cache_random_seq(s))
			goto error;
	}

5133
	if (!init_kmem_cache_nodes(s))
5134
		goto error;
Christoph Lameter's avatar
Christoph Lameter committed
5135

5136
	if (alloc_kmem_cache_cpus(s))
5137
		return 0;
5138

Christoph Lameter's avatar
Christoph Lameter committed
5139
error:
5140
	__kmem_cache_release(s);
5141
	return -EINVAL;
Christoph Lameter's avatar
Christoph Lameter committed
5142 5143
}

5144
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
5145
			      const char *text)
5146 5147
{
#ifdef CONFIG_SLUB_DEBUG
5148
	void *addr = slab_address(slab);
5149
	void *p;
5150

5151
	slab_err(s, slab, text, s->name);
5152

5153 5154 5155
	spin_lock(&object_map_lock);
	__fill_map(object_map, s, slab);

5156
	for_each_object(p, s, addr, slab->objects) {
5157

5158
		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5159
			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5160 5161 5162
			print_tracking(s, p);
		}
	}
5163
	spin_unlock(&object_map_lock);
5164 5165 5166
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
5167
/*
5168
 * Attempt to free all partial slabs on a node.
5169 5170
 * This is called from __kmem_cache_shutdown(). We must take list_lock
 * because sysfs file might still access partial list after the shutdowning.
Christoph Lameter's avatar
Christoph Lameter committed
5171
 */
5172
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter's avatar
Christoph Lameter committed
5173
{
5174
	LIST_HEAD(discard);
5175
	struct slab *slab, *h;
Christoph Lameter's avatar
Christoph Lameter committed
5176

5177 5178
	BUG_ON(irqs_disabled());
	spin_lock_irq(&n->list_lock);
5179 5180 5181 5182
	list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
		if (!slab->inuse) {
			remove_partial(n, slab);
			list_add(&slab->slab_list, &discard);
5183
		} else {
5184
			list_slab_objects(s, slab,
5185
			  "Objects remaining in %s on __kmem_cache_shutdown()");
5186
		}
5187
	}
5188
	spin_unlock_irq(&n->list_lock);
5189

5190 5191
	list_for_each_entry_safe(slab, h, &discard, slab_list)
		discard_slab(s, slab);
Christoph Lameter's avatar
Christoph Lameter committed
5192 5193
}

5194 5195 5196 5197 5198 5199
bool __kmem_cache_empty(struct kmem_cache *s)
{
	int node;
	struct kmem_cache_node *n;

	for_each_kmem_cache_node(s, node, n)
5200
		if (n->nr_partial || node_nr_slabs(n))
5201 5202 5203 5204
			return false;
	return true;
}

Christoph Lameter's avatar
Christoph Lameter committed
5205
/*
Christoph Lameter's avatar
Christoph Lameter committed
5206
 * Release all resources used by a slab cache.
Christoph Lameter's avatar
Christoph Lameter committed
5207
 */
5208
int __kmem_cache_shutdown(struct kmem_cache *s)
Christoph Lameter's avatar
Christoph Lameter committed
5209 5210
{
	int node;
5211
	struct kmem_cache_node *n;
Christoph Lameter's avatar
Christoph Lameter committed
5212

5213
	flush_all_cpus_locked(s);
Christoph Lameter's avatar
Christoph Lameter committed
5214
	/* Attempt to free all objects */
5215
	for_each_kmem_cache_node(s, node, n) {
5216
		free_partial(s, n);
5217
		if (n->nr_partial || node_nr_slabs(n))
Christoph Lameter's avatar
Christoph Lameter committed
5218 5219 5220 5221 5222
			return 1;
	}
	return 0;
}

5223
#ifdef CONFIG_PRINTK
5224
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5225 5226 5227 5228 5229 5230
{
	void *base;
	int __maybe_unused i;
	unsigned int objnr;
	void *objp;
	void *objp0;
5231
	struct kmem_cache *s = slab->slab_cache;
5232 5233 5234
	struct track __maybe_unused *trackp;

	kpp->kp_ptr = object;
5235
	kpp->kp_slab = slab;
5236
	kpp->kp_slab_cache = s;
5237
	base = slab_address(slab);
5238 5239 5240 5241 5242 5243
	objp0 = kasan_reset_tag(object);
#ifdef CONFIG_SLUB_DEBUG
	objp = restore_red_left(s, objp0);
#else
	objp = objp0;
#endif
5244
	objnr = obj_to_index(s, slab, objp);
5245 5246 5247
	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
	objp = base + s->size * objnr;
	kpp->kp_objp = objp;
5248 5249
	if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
			 || (objp - base) % s->size) ||
5250 5251 5252
	    !(s->flags & SLAB_STORE_USER))
		return;
#ifdef CONFIG_SLUB_DEBUG
5253
	objp = fixup_red_left(s, objp);
5254 5255
	trackp = get_track(s, objp, TRACK_ALLOC);
	kpp->kp_ret = (void *)trackp->addr;
5256 5257 5258 5259 5260
#ifdef CONFIG_STACKDEPOT
	{
		depot_stack_handle_t handle;
		unsigned long *entries;
		unsigned int nr_entries;
5261

5262 5263 5264 5265 5266 5267
		handle = READ_ONCE(trackp->handle);
		if (handle) {
			nr_entries = stack_depot_fetch(handle, &entries);
			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
				kpp->kp_stack[i] = (void *)entries[i];
		}
5268

5269 5270 5271 5272 5273 5274 5275
		trackp = get_track(s, objp, TRACK_FREE);
		handle = READ_ONCE(trackp->handle);
		if (handle) {
			nr_entries = stack_depot_fetch(handle, &entries);
			for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
				kpp->kp_free_stack[i] = (void *)entries[i];
		}
5276
	}
5277 5278 5279
#endif
#endif
}
5280
#endif
5281

Christoph Lameter's avatar
Christoph Lameter committed
5282 5283 5284 5285 5286 5287
/********************************************************************
 *		Kmalloc subsystem
 *******************************************************************/

static int __init setup_slub_min_order(char *str)
{
5288
	get_option(&str, (int *)&slub_min_order);
Christoph Lameter's avatar
Christoph Lameter committed
5289

5290 5291 5292
	if (slub_min_order > slub_max_order)
		slub_max_order = slub_min_order;

Christoph Lameter's avatar
Christoph Lameter committed
5293 5294 5295
	return 1;
}

5296 5297 5298
__setup("slab_min_order=", setup_slub_min_order);
__setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);

Christoph Lameter's avatar
Christoph Lameter committed
5299 5300 5301

static int __init setup_slub_max_order(char *str)
{
5302
	get_option(&str, (int *)&slub_max_order);
5303
	slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
Christoph Lameter's avatar
Christoph Lameter committed
5304

5305 5306 5307
	if (slub_min_order > slub_max_order)
		slub_min_order = slub_max_order;

Christoph Lameter's avatar
Christoph Lameter committed
5308 5309 5310
	return 1;
}

5311 5312
__setup("slab_max_order=", setup_slub_max_order);
__setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
Christoph Lameter's avatar
Christoph Lameter committed
5313 5314 5315

static int __init setup_slub_min_objects(char *str)
{
5316
	get_option(&str, (int *)&slub_min_objects);
Christoph Lameter's avatar
Christoph Lameter committed
5317 5318 5319 5320

	return 1;
}

5321 5322
__setup("slab_min_objects=", setup_slub_min_objects);
__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
Christoph Lameter's avatar
Christoph Lameter committed
5323

5324 5325
#ifdef CONFIG_HARDENED_USERCOPY
/*
5326 5327 5328
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
5329 5330 5331 5332
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
5333 5334
void __check_heap_object(const void *ptr, unsigned long n,
			 const struct slab *slab, bool to_user)
5335 5336
{
	struct kmem_cache *s;
5337
	unsigned int offset;
5338
	bool is_kfence = is_kfence_address(ptr);
5339

5340 5341
	ptr = kasan_reset_tag(ptr);

5342
	/* Find object and usable object size. */
5343
	s = slab->slab_cache;
5344 5345

	/* Reject impossible pointers. */
5346
	if (ptr < slab_address(slab))
5347 5348
		usercopy_abort("SLUB object not in SLUB page?!", NULL,
			       to_user, 0, n);
5349 5350

	/* Find offset within object. */
5351 5352 5353
	if (is_kfence)
		offset = ptr - kfence_object_start(ptr);
	else
5354
		offset = (ptr - slab_address(slab)) % s->size;
5355 5356

	/* Adjust for redzone and reject if within the redzone. */
5357
	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
5358
		if (offset < s->red_left_pad)
5359 5360
			usercopy_abort("SLUB object in left red zone",
				       s->name, to_user, offset, n);
5361 5362 5363
		offset -= s->red_left_pad;
	}

5364 5365 5366 5367
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= s->useroffset &&
	    offset - s->useroffset <= s->usersize &&
	    n <= s->useroffset - offset + s->usersize)
5368
		return;
5369

5370
	usercopy_abort("SLUB object", s->name, to_user, offset, n);
5371 5372 5373
}
#endif /* CONFIG_HARDENED_USERCOPY */

5374 5375
#define SHRINK_PROMOTE_MAX 32

5376
/*
5377 5378 5379
 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
 * up most to the head of the partial lists. New allocations will then
 * fill those up and thus they can be removed from the partial lists.
Christoph Lameter's avatar
Christoph Lameter committed
5380 5381 5382 5383
 *
 * The slabs with the least items are placed last. This results in them
 * being allocated from last increasing the chance that the last objects
 * are freed in them.
5384
 */
5385
static int __kmem_cache_do_shrink(struct kmem_cache *s)
5386 5387 5388 5389
{
	int node;
	int i;
	struct kmem_cache_node *n;
5390 5391
	struct slab *slab;
	struct slab *t;
5392 5393
	struct list_head discard;
	struct list_head promote[SHRINK_PROMOTE_MAX];
5394
	unsigned long flags;
5395
	int ret = 0;
5396

5397
	for_each_kmem_cache_node(s, node, n) {
5398 5399 5400
		INIT_LIST_HEAD(&discard);
		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
			INIT_LIST_HEAD(promote + i);
5401 5402 5403 5404

		spin_lock_irqsave(&n->list_lock, flags);

		/*
5405
		 * Build lists of slabs to discard or promote.
5406
		 *
Christoph Lameter's avatar
Christoph Lameter committed
5407
		 * Note that concurrent frees may occur while we hold the
5408
		 * list_lock. slab->inuse here is the upper limit.
5409
		 */
5410 5411
		list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
			int free = slab->objects - slab->inuse;
5412

5413
			/* Do not reread slab->inuse */
5414 5415 5416 5417 5418
			barrier();

			/* We do not keep full slabs on the list */
			BUG_ON(free <= 0);

5419 5420
			if (free == slab->objects) {
				list_move(&slab->slab_list, &discard);
5421
				slab_clear_node_partial(slab);
5422
				n->nr_partial--;
5423
				dec_slabs_node(s, node, slab->objects);
5424
			} else if (free <= SHRINK_PROMOTE_MAX)
5425
				list_move(&slab->slab_list, promote + free - 1);
5426 5427 5428
		}

		/*
5429 5430
		 * Promote the slabs filled up most to the head of the
		 * partial list.
5431
		 */
5432 5433
		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
			list_splice(promote + i, &n->partial);
5434 5435

		spin_unlock_irqrestore(&n->list_lock, flags);
5436 5437

		/* Release empty slabs */
5438
		list_for_each_entry_safe(slab, t, &discard, slab_list)
5439
			free_slab(s, slab);
5440

5441
		if (node_nr_slabs(n))
5442
			ret = 1;
5443 5444
	}

5445
	return ret;
5446 5447
}

5448 5449 5450 5451 5452 5453
int __kmem_cache_shrink(struct kmem_cache *s)
{
	flush_all(s);
	return __kmem_cache_do_shrink(s);
}

5454 5455 5456 5457
static int slab_mem_going_offline_callback(void *arg)
{
	struct kmem_cache *s;

5458
	mutex_lock(&slab_mutex);
5459 5460 5461 5462
	list_for_each_entry(s, &slab_caches, list) {
		flush_all_cpus_locked(s);
		__kmem_cache_do_shrink(s);
	}
5463
	mutex_unlock(&slab_mutex);
5464 5465 5466 5467 5468 5469 5470 5471 5472

	return 0;
}

static void slab_mem_offline_callback(void *arg)
{
	struct memory_notify *marg = arg;
	int offline_node;

5473
	offline_node = marg->status_change_nid_normal;
5474 5475 5476 5477 5478 5479 5480 5481

	/*
	 * If the node still has available memory. we need kmem_cache_node
	 * for it yet.
	 */
	if (offline_node < 0)
		return;

5482
	mutex_lock(&slab_mutex);
5483
	node_clear(offline_node, slab_nodes);
5484 5485 5486 5487 5488
	/*
	 * We no longer free kmem_cache_node structures here, as it would be
	 * racy with all get_node() users, and infeasible to protect them with
	 * slab_mutex.
	 */
5489
	mutex_unlock(&slab_mutex);
5490 5491 5492 5493 5494 5495 5496
}

static int slab_mem_going_online_callback(void *arg)
{
	struct kmem_cache_node *n;
	struct kmem_cache *s;
	struct memory_notify *marg = arg;
5497
	int nid = marg->status_change_nid_normal;
5498 5499 5500 5501 5502 5503 5504 5505 5506 5507
	int ret = 0;

	/*
	 * If the node's memory is already available, then kmem_cache_node is
	 * already created. Nothing to do.
	 */
	if (nid < 0)
		return 0;

	/*
5508
	 * We are bringing a node online. No memory is available yet. We must
5509 5510 5511
	 * allocate a kmem_cache_node structure in order to bring the node
	 * online.
	 */
5512
	mutex_lock(&slab_mutex);
5513
	list_for_each_entry(s, &slab_caches, list) {
5514 5515 5516 5517 5518 5519
		/*
		 * The structure may already exist if the node was previously
		 * onlined and offlined.
		 */
		if (get_node(s, nid))
			continue;
5520 5521 5522 5523 5524
		/*
		 * XXX: kmem_cache_alloc_node will fallback to other nodes
		 *      since memory is not yet available from the node that
		 *      is brought up.
		 */
5525
		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
5526 5527 5528 5529
		if (!n) {
			ret = -ENOMEM;
			goto out;
		}
5530
		init_kmem_cache_node(n);
5531 5532
		s->node[nid] = n;
	}
5533 5534 5535 5536 5537
	/*
	 * Any cache created after this point will also have kmem_cache_node
	 * initialized for the new node.
	 */
	node_set(nid, slab_nodes);
5538
out:
5539
	mutex_unlock(&slab_mutex);
5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562
	return ret;
}

static int slab_memory_callback(struct notifier_block *self,
				unsigned long action, void *arg)
{
	int ret = 0;

	switch (action) {
	case MEM_GOING_ONLINE:
		ret = slab_mem_going_online_callback(arg);
		break;
	case MEM_GOING_OFFLINE:
		ret = slab_mem_going_offline_callback(arg);
		break;
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
		slab_mem_offline_callback(arg);
		break;
	case MEM_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
5563 5564 5565 5566
	if (ret)
		ret = notifier_from_errno(ret);
	else
		ret = NOTIFY_OK;
5567 5568 5569
	return ret;
}

Christoph Lameter's avatar
Christoph Lameter committed
5570 5571 5572 5573
/********************************************************************
 *			Basic setup of slabs
 *******************************************************************/

5574 5575
/*
 * Used for early kmem_cache structures that were allocated using
5576 5577
 * the page allocator. Allocate them properly then fix up the pointers
 * that may be pointing to the wrong kmem_cache structure.
5578 5579
 */

5580
static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
5581 5582
{
	int node;
5583
	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
5584
	struct kmem_cache_node *n;
5585

5586
	memcpy(s, static_cache, kmem_cache->object_size);
5587

5588 5589 5590 5591 5592 5593
	/*
	 * This runs very early, and only the boot processor is supposed to be
	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
	 * IPIs around.
	 */
	__flush_cpu_slab(s, smp_processor_id());
5594
	for_each_kmem_cache_node(s, node, n) {
5595
		struct slab *p;
5596

5597
		list_for_each_entry(p, &n->partial, slab_list)
5598
			p->slab_cache = s;
5599

Li Zefan's avatar
Li Zefan committed
5600
#ifdef CONFIG_SLUB_DEBUG
5601
		list_for_each_entry(p, &n->full, slab_list)
5602
			p->slab_cache = s;
5603 5604
#endif
	}
5605 5606
	list_add(&s->list, &slab_caches);
	return s;
5607 5608
}

Christoph Lameter's avatar
Christoph Lameter committed
5609 5610
void __init kmem_cache_init(void)
{
5611 5612
	static __initdata struct kmem_cache boot_kmem_cache,
		boot_kmem_cache_node;
5613
	int node;
5614

5615 5616 5617
	if (debug_guardpage_minorder())
		slub_max_order = 0;

5618 5619 5620 5621
	/* Print slub debugging pointers without hashing */
	if (__slub_debug_enabled())
		no_hash_pointers_enable(NULL);

5622 5623
	kmem_cache_node = &boot_kmem_cache_node;
	kmem_cache = &boot_kmem_cache;
5624

5625 5626 5627 5628 5629 5630 5631
	/*
	 * Initialize the nodemask for which we will allocate per node
	 * structures. Here we don't need taking slab_mutex yet.
	 */
	for_each_node_state(node, N_NORMAL_MEMORY)
		node_set(node, slab_nodes);

5632
	create_boot_cache(kmem_cache_node, "kmem_cache_node",
5633
		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
5634

5635
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
Christoph Lameter's avatar
Christoph Lameter committed
5636 5637 5638 5639

	/* Able to allocate the per node structures */
	slab_state = PARTIAL;

5640 5641 5642
	create_boot_cache(kmem_cache, "kmem_cache",
			offsetof(struct kmem_cache, node) +
				nr_node_ids * sizeof(struct kmem_cache_node *),
5643
		       SLAB_HWCACHE_ALIGN, 0, 0);
5644

5645 5646
	kmem_cache = bootstrap(&boot_kmem_cache);
	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
5647 5648

	/* Now we can use the kmem_cache to allocate kmalloc slabs */
5649
	setup_kmalloc_cache_index_table();
5650
	create_kmalloc_caches();
Christoph Lameter's avatar
Christoph Lameter committed
5651

5652 5653 5654
	/* Setup random freelists for each cache */
	init_freelist_randomization();

5655 5656
	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
				  slub_cpu_dead);
Christoph Lameter's avatar
Christoph Lameter committed
5657

5658
	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
5659
		cache_line_size(),
Christoph Lameter's avatar
Christoph Lameter committed
5660 5661 5662 5663
		slub_min_order, slub_max_order, slub_min_objects,
		nr_cpu_ids, nr_node_ids);
}

5664 5665
void __init kmem_cache_init_late(void)
{
5666
#ifndef CONFIG_SLUB_TINY
5667 5668
	flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
	WARN_ON(!flushwq);
5669
#endif
5670 5671
}

5672
struct kmem_cache *
5673
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
5674
		   slab_flags_t flags, void (*ctor)(void *))
Christoph Lameter's avatar
Christoph Lameter committed
5675
{
5676
	struct kmem_cache *s;
Christoph Lameter's avatar
Christoph Lameter committed
5677

5678
	s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter's avatar
Christoph Lameter committed
5679
	if (s) {
5680 5681 5682
		if (sysfs_slab_alias(s, name))
			return NULL;

Christoph Lameter's avatar
Christoph Lameter committed
5683
		s->refcount++;
5684

Christoph Lameter's avatar
Christoph Lameter committed
5685 5686 5687 5688
		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
5689
		s->object_size = max(s->object_size, size);
5690
		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
5691
	}
Christoph Lameter's avatar
Christoph Lameter committed
5692

5693 5694
	return s;
}
5695

5696
int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
5697
{
5698 5699 5700 5701 5702
	int err;

	err = kmem_cache_open(s, flags);
	if (err)
		return err;
5703

5704 5705 5706 5707
	/* Mutex is not taken during early boot */
	if (slab_state <= UP)
		return 0;

5708
	err = sysfs_slab_add(s);
5709
	if (err) {
5710
		__kmem_cache_release(s);
5711 5712
		return err;
	}
5713

5714 5715 5716
	if (s->flags & SLAB_STORE_USER)
		debugfs_slab_add(s);

5717
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
5718 5719
}

5720
#ifdef SLAB_SUPPORTS_SYSFS
5721
static int count_inuse(struct slab *slab)
5722
{
5723
	return slab->inuse;
5724 5725
}

5726
static int count_total(struct slab *slab)
5727
{
5728
	return slab->objects;
5729
}
5730
#endif
5731

5732
#ifdef CONFIG_SLUB_DEBUG
5733
static void validate_slab(struct kmem_cache *s, struct slab *slab,
5734
			  unsigned long *obj_map)
5735 5736
{
	void *p;
5737
	void *addr = slab_address(slab);
5738

5739
	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
5740
		return;
5741 5742

	/* Now we know that a valid freelist exists */
5743 5744
	__fill_map(obj_map, s, slab);
	for_each_object(p, s, addr, slab->objects) {
5745
		u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
5746
			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
5747

5748
		if (!check_object(s, slab, p, val))
5749 5750
			break;
	}
5751 5752
}

5753
static int validate_slab_node(struct kmem_cache *s,
5754
		struct kmem_cache_node *n, unsigned long *obj_map)
5755 5756
{
	unsigned long count = 0;
5757
	struct slab *slab;
5758 5759 5760 5761
	unsigned long flags;

	spin_lock_irqsave(&n->list_lock, flags);

5762 5763
	list_for_each_entry(slab, &n->partial, slab_list) {
		validate_slab(s, slab, obj_map);
5764 5765
		count++;
	}
5766
	if (count != n->nr_partial) {
5767 5768
		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
		       s->name, count, n->nr_partial);
5769 5770
		slab_add_kunit_errors();
	}
5771 5772 5773 5774

	if (!(s->flags & SLAB_STORE_USER))
		goto out;

5775 5776
	list_for_each_entry(slab, &n->full, slab_list) {
		validate_slab(s, slab, obj_map);
5777 5778
		count++;
	}
5779
	if (count != node_nr_slabs(n)) {
5780
		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
5781
		       s->name, count, node_nr_slabs(n));
5782 5783
		slab_add_kunit_errors();
	}
5784 5785 5786 5787 5788 5789

out:
	spin_unlock_irqrestore(&n->list_lock, flags);
	return count;
}

5790
long validate_slab_cache(struct kmem_cache *s)
5791 5792 5793
{
	int node;
	unsigned long count = 0;
5794
	struct kmem_cache_node *n;
5795 5796 5797 5798 5799
	unsigned long *obj_map;

	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
	if (!obj_map)
		return -ENOMEM;
5800 5801

	flush_all(s);
5802
	for_each_kmem_cache_node(s, node, n)
5803 5804 5805
		count += validate_slab_node(s, n, obj_map);

	bitmap_free(obj_map);
5806

5807 5808
	return count;
}
5809 5810
EXPORT_SYMBOL(validate_slab_cache);

5811
#ifdef CONFIG_DEBUG_FS
5812
/*
Christoph Lameter's avatar
Christoph Lameter committed
5813
 * Generate lists of code addresses where slabcache objects are allocated
5814 5815 5816 5817
 * and freed.
 */

struct location {
5818
	depot_stack_handle_t handle;
5819
	unsigned long count;
5820
	unsigned long addr;
5821
	unsigned long waste;
5822 5823 5824 5825 5826
	long long sum_time;
	long min_time;
	long max_time;
	long min_pid;
	long max_pid;
Rusty Russell's avatar
Rusty Russell committed
5827
	DECLARE_BITMAP(cpus, NR_CPUS);
5828
	nodemask_t nodes;
5829 5830 5831 5832 5833 5834
};

struct loc_track {
	unsigned long max;
	unsigned long count;
	struct location *loc;
5835
	loff_t idx;
5836 5837
};

5838 5839
static struct dentry *slab_debugfs_root;

5840 5841 5842 5843 5844 5845 5846
static void free_loc_track(struct loc_track *t)
{
	if (t->max)
		free_pages((unsigned long)t->loc,
			get_order(sizeof(struct location) * t->max));
}

5847
static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
5848 5849 5850 5851 5852 5853
{
	struct location *l;
	int order;

	order = get_order(sizeof(struct location) * max);

5854
	l = (void *)__get_free_pages(flags, order);
5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867
	if (!l)
		return 0;

	if (t->count) {
		memcpy(l, t->loc, sizeof(struct location) * t->count);
		free_loc_track(t);
	}
	t->max = max;
	t->loc = l;
	return 1;
}

static int add_location(struct loc_track *t, struct kmem_cache *s,
5868 5869
				const struct track *track,
				unsigned int orig_size)
5870 5871 5872
{
	long start, end, pos;
	struct location *l;
5873
	unsigned long caddr, chandle, cwaste;
5874
	unsigned long age = jiffies - track->when;
5875
	depot_stack_handle_t handle = 0;
5876
	unsigned int waste = s->object_size - orig_size;
5877

5878 5879 5880
#ifdef CONFIG_STACKDEPOT
	handle = READ_ONCE(track->handle);
#endif
5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893
	start = -1;
	end = t->count;

	for ( ; ; ) {
		pos = start + (end - start + 1) / 2;

		/*
		 * There is nothing at "end". If we end up there
		 * we need to add something to before end.
		 */
		if (pos == end)
			break;

5894 5895 5896 5897 5898 5899
		l = &t->loc[pos];
		caddr = l->addr;
		chandle = l->handle;
		cwaste = l->waste;
		if ((track->addr == caddr) && (handle == chandle) &&
			(waste == cwaste)) {
5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913

			l->count++;
			if (track->when) {
				l->sum_time += age;
				if (age < l->min_time)
					l->min_time = age;
				if (age > l->max_time)
					l->max_time = age;

				if (track->pid < l->min_pid)
					l->min_pid = track->pid;
				if (track->pid > l->max_pid)
					l->max_pid = track->pid;

Rusty Russell's avatar
Rusty Russell committed
5914 5915
				cpumask_set_cpu(track->cpu,
						to_cpumask(l->cpus));
5916 5917
			}
			node_set(page_to_nid(virt_to_page(track)), l->nodes);
5918 5919 5920
			return 1;
		}

5921
		if (track->addr < caddr)
5922
			end = pos;
5923 5924
		else if (track->addr == caddr && handle < chandle)
			end = pos;
5925 5926 5927
		else if (track->addr == caddr && handle == chandle &&
				waste < cwaste)
			end = pos;
5928 5929 5930 5931 5932
		else
			start = pos;
	}

	/*
Christoph Lameter's avatar
Christoph Lameter committed
5933
	 * Not found. Insert new tracking element.
5934
	 */
5935
	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
5936 5937 5938 5939 5940 5941 5942 5943
		return 0;

	l = t->loc + pos;
	if (pos < t->count)
		memmove(l + 1, l,
			(t->count - pos) * sizeof(struct location));
	t->count++;
	l->count = 1;
5944 5945 5946 5947 5948 5949
	l->addr = track->addr;
	l->sum_time = age;
	l->min_time = age;
	l->max_time = age;
	l->min_pid = track->pid;
	l->max_pid = track->pid;
5950
	l->handle = handle;
5951
	l->waste = waste;
Rusty Russell's avatar
Rusty Russell committed
5952 5953
	cpumask_clear(to_cpumask(l->cpus));
	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
5954 5955
	nodes_clear(l->nodes);
	node_set(page_to_nid(virt_to_page(track)), l->nodes);
5956 5957 5958 5959
	return 1;
}

static void process_slab(struct loc_track *t, struct kmem_cache *s,
5960
		struct slab *slab, enum track_item alloc,
5961
		unsigned long *obj_map)
5962
{
5963
	void *addr = slab_address(slab);
5964
	bool is_alloc = (alloc == TRACK_ALLOC);
5965 5966
	void *p;

5967
	__fill_map(obj_map, s, slab);
5968

5969
	for_each_object(p, s, addr, slab->objects)
5970
		if (!test_bit(__obj_to_index(s, addr, p), obj_map))
5971 5972 5973
			add_location(t, s, get_track(s, p, alloc),
				     is_alloc ? get_orig_size(s, p) :
						s->object_size);
5974
}
5975
#endif  /* CONFIG_DEBUG_FS   */
5976
#endif	/* CONFIG_SLUB_DEBUG */
5977

5978
#ifdef SLAB_SUPPORTS_SYSFS
Christoph Lameter's avatar
Christoph Lameter committed
5979
enum slab_stat_type {
5980 5981 5982 5983 5984
	SL_ALL,			/* All slabs */
	SL_PARTIAL,		/* Only partially allocated slabs */
	SL_CPU,			/* Only slabs used for cpu caches */
	SL_OBJECTS,		/* Determine allocated objects not slabs */
	SL_TOTAL		/* Determine object capacity not slabs */
Christoph Lameter's avatar
Christoph Lameter committed
5985 5986
};

5987
#define SO_ALL		(1 << SL_ALL)
Christoph Lameter's avatar
Christoph Lameter committed
5988 5989 5990
#define SO_PARTIAL	(1 << SL_PARTIAL)
#define SO_CPU		(1 << SL_CPU)
#define SO_OBJECTS	(1 << SL_OBJECTS)
5991
#define SO_TOTAL	(1 << SL_TOTAL)
Christoph Lameter's avatar
Christoph Lameter committed
5992

5993
static ssize_t show_slab_objects(struct kmem_cache *s,
5994
				 char *buf, unsigned long flags)
Christoph Lameter's avatar
Christoph Lameter committed
5995 5996 5997 5998 5999
{
	unsigned long total = 0;
	int node;
	int x;
	unsigned long *nodes;
6000
	int len = 0;
Christoph Lameter's avatar
Christoph Lameter committed
6001

6002
	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6003 6004
	if (!nodes)
		return -ENOMEM;
Christoph Lameter's avatar
Christoph Lameter committed
6005

6006 6007
	if (flags & SO_CPU) {
		int cpu;
Christoph Lameter's avatar
Christoph Lameter committed
6008

6009
		for_each_possible_cpu(cpu) {
6010 6011
			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
							       cpu);
6012
			int node;
6013
			struct slab *slab;
6014

6015 6016
			slab = READ_ONCE(c->slab);
			if (!slab)
6017
				continue;
6018

6019
			node = slab_nid(slab);
6020
			if (flags & SO_TOTAL)
6021
				x = slab->objects;
6022
			else if (flags & SO_OBJECTS)
6023
				x = slab->inuse;
6024 6025
			else
				x = 1;
6026

6027 6028 6029
			total += x;
			nodes[node] += x;

6030
#ifdef CONFIG_SLUB_CPU_PARTIAL
6031 6032 6033
			slab = slub_percpu_partial_read_once(c);
			if (slab) {
				node = slab_nid(slab);
6034 6035 6036 6037 6038
				if (flags & SO_TOTAL)
					WARN_ON_ONCE(1);
				else if (flags & SO_OBJECTS)
					WARN_ON_ONCE(1);
				else
6039
					x = data_race(slab->slabs);
6040 6041
				total += x;
				nodes[node] += x;
6042
			}
6043
#endif
Christoph Lameter's avatar
Christoph Lameter committed
6044 6045 6046
		}
	}

6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057
	/*
	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
	 * already held which will conflict with an existing lock order:
	 *
	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
	 *
	 * We don't really need mem_hotplug_lock (to hold off
	 * slab_mem_going_offline_callback) here because slab's memory hot
	 * unplug code doesn't destroy the kmem_cache->node[] data.
	 */

6058
#ifdef CONFIG_SLUB_DEBUG
6059
	if (flags & SO_ALL) {
6060 6061 6062
		struct kmem_cache_node *n;

		for_each_kmem_cache_node(s, node, n) {
6063

6064
			if (flags & SO_TOTAL)
6065
				x = node_nr_objs(n);
6066
			else if (flags & SO_OBJECTS)
6067
				x = node_nr_objs(n) - count_partial(n, count_free);
Christoph Lameter's avatar
Christoph Lameter committed
6068
			else
6069
				x = node_nr_slabs(n);
Christoph Lameter's avatar
Christoph Lameter committed
6070 6071 6072 6073
			total += x;
			nodes[node] += x;
		}

6074 6075 6076
	} else
#endif
	if (flags & SO_PARTIAL) {
6077
		struct kmem_cache_node *n;
Christoph Lameter's avatar
Christoph Lameter committed
6078

6079
		for_each_kmem_cache_node(s, node, n) {
6080 6081 6082 6083
			if (flags & SO_TOTAL)
				x = count_partial(n, count_total);
			else if (flags & SO_OBJECTS)
				x = count_partial(n, count_inuse);
Christoph Lameter's avatar
Christoph Lameter committed
6084
			else
6085
				x = n->nr_partial;
Christoph Lameter's avatar
Christoph Lameter committed
6086 6087 6088 6089
			total += x;
			nodes[node] += x;
		}
	}
6090 6091

	len += sysfs_emit_at(buf, len, "%lu", total);
Christoph Lameter's avatar
Christoph Lameter committed
6092
#ifdef CONFIG_NUMA
6093
	for (node = 0; node < nr_node_ids; node++) {
Christoph Lameter's avatar
Christoph Lameter committed
6094
		if (nodes[node])
6095 6096 6097
			len += sysfs_emit_at(buf, len, " N%d=%lu",
					     node, nodes[node]);
	}
Christoph Lameter's avatar
Christoph Lameter committed
6098
#endif
6099
	len += sysfs_emit_at(buf, len, "\n");
Christoph Lameter's avatar
Christoph Lameter committed
6100
	kfree(nodes);
6101 6102

	return len;
Christoph Lameter's avatar
Christoph Lameter committed
6103 6104 6105
}

#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6106
#define to_slab(n) container_of(n, struct kmem_cache, kobj)
Christoph Lameter's avatar
Christoph Lameter committed
6107 6108 6109 6110 6111 6112 6113 6114

struct slab_attribute {
	struct attribute attr;
	ssize_t (*show)(struct kmem_cache *s, char *buf);
	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
};

#define SLAB_ATTR_RO(_name) \
6115
	static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
Christoph Lameter's avatar
Christoph Lameter committed
6116 6117

#define SLAB_ATTR(_name) \
6118
	static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
Christoph Lameter's avatar
Christoph Lameter committed
6119 6120 6121

static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{
6122
	return sysfs_emit(buf, "%u\n", s->size);
Christoph Lameter's avatar
Christoph Lameter committed
6123 6124 6125 6126 6127
}
SLAB_ATTR_RO(slab_size);

static ssize_t align_show(struct kmem_cache *s, char *buf)
{
6128
	return sysfs_emit(buf, "%u\n", s->align);
Christoph Lameter's avatar
Christoph Lameter committed
6129 6130 6131 6132 6133
}
SLAB_ATTR_RO(align);

static ssize_t object_size_show(struct kmem_cache *s, char *buf)
{
6134
	return sysfs_emit(buf, "%u\n", s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
6135 6136 6137 6138 6139
}
SLAB_ATTR_RO(object_size);

static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
{
6140
	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
Christoph Lameter's avatar
Christoph Lameter committed
6141 6142 6143 6144 6145
}
SLAB_ATTR_RO(objs_per_slab);

static ssize_t order_show(struct kmem_cache *s, char *buf)
{
6146
	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
Christoph Lameter's avatar
Christoph Lameter committed
6147
}
6148
SLAB_ATTR_RO(order);
Christoph Lameter's avatar
Christoph Lameter committed
6149

6150 6151
static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
{
6152
	return sysfs_emit(buf, "%lu\n", s->min_partial);
6153 6154 6155 6156 6157 6158 6159 6160
}

static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
				 size_t length)
{
	unsigned long min;
	int err;

6161
	err = kstrtoul(buf, 10, &min);
6162 6163 6164
	if (err)
		return err;

6165
	s->min_partial = min;
6166 6167 6168 6169
	return length;
}
SLAB_ATTR(min_partial);

6170 6171
static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
{
6172 6173 6174 6175 6176 6177
	unsigned int nr_partial = 0;
#ifdef CONFIG_SLUB_CPU_PARTIAL
	nr_partial = s->cpu_partial;
#endif

	return sysfs_emit(buf, "%u\n", nr_partial);
6178 6179 6180 6181 6182
}

static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
				 size_t length)
{
6183
	unsigned int objects;
6184 6185
	int err;

6186
	err = kstrtouint(buf, 10, &objects);
6187 6188
	if (err)
		return err;
6189
	if (objects && !kmem_cache_has_cpu_partial(s))
6190
		return -EINVAL;
6191

6192
	slub_set_cpu_partial(s, objects);
6193 6194 6195 6196 6197
	flush_all(s);
	return length;
}
SLAB_ATTR(cpu_partial);

Christoph Lameter's avatar
Christoph Lameter committed
6198 6199
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
6200 6201
	if (!s->ctor)
		return 0;
6202
	return sysfs_emit(buf, "%pS\n", s->ctor);
Christoph Lameter's avatar
Christoph Lameter committed
6203 6204 6205 6206 6207
}
SLAB_ATTR_RO(ctor);

static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
6208
	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
Christoph Lameter's avatar
Christoph Lameter committed
6209 6210 6211 6212 6213
}
SLAB_ATTR_RO(aliases);

static ssize_t partial_show(struct kmem_cache *s, char *buf)
{
6214
	return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter's avatar
Christoph Lameter committed
6215 6216 6217 6218 6219
}
SLAB_ATTR_RO(partial);

static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
{
6220
	return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter's avatar
Christoph Lameter committed
6221 6222 6223
}
SLAB_ATTR_RO(cpu_slabs);

6224 6225 6226 6227 6228 6229
static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
{
	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
}
SLAB_ATTR_RO(objects_partial);

6230 6231 6232
static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
{
	int objects = 0;
6233
	int slabs = 0;
6234
	int cpu __maybe_unused;
6235
	int len = 0;
6236

6237
#ifdef CONFIG_SLUB_CPU_PARTIAL
6238
	for_each_online_cpu(cpu) {
6239
		struct slab *slab;
6240

6241
		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6242

6243
		if (slab)
6244
			slabs += data_race(slab->slabs);
6245
	}
6246
#endif
6247

6248
	/* Approximate half-full slabs, see slub_set_cpu_partial() */
6249 6250
	objects = (slabs * oo_objects(s->oo)) / 2;
	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6251

6252
#ifdef CONFIG_SLUB_CPU_PARTIAL
6253
	for_each_online_cpu(cpu) {
6254
		struct slab *slab;
6255

6256 6257
		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
		if (slab) {
6258
			slabs = data_race(slab->slabs);
6259
			objects = (slabs * oo_objects(s->oo)) / 2;
6260
			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
6261
					     cpu, objects, slabs);
6262
		}
6263 6264
	}
#endif
6265 6266 6267
	len += sysfs_emit_at(buf, len, "\n");

	return len;
6268 6269 6270
}
SLAB_ATTR_RO(slabs_cpu_partial);

6271 6272
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
6273
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6274
}
6275
SLAB_ATTR_RO(reclaim_account);
6276 6277 6278

static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{
6279
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6280 6281 6282 6283 6284 6285
}
SLAB_ATTR_RO(hwcache_align);

#ifdef CONFIG_ZONE_DMA
static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{
6286
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6287 6288 6289 6290
}
SLAB_ATTR_RO(cache_dma);
#endif

6291
#ifdef CONFIG_HARDENED_USERCOPY
6292 6293
static ssize_t usersize_show(struct kmem_cache *s, char *buf)
{
6294
	return sysfs_emit(buf, "%u\n", s->usersize);
6295 6296
}
SLAB_ATTR_RO(usersize);
6297
#endif
6298

6299 6300
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
6301
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6302 6303 6304
}
SLAB_ATTR_RO(destroy_by_rcu);

6305
#ifdef CONFIG_SLUB_DEBUG
6306 6307 6308 6309 6310 6311
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
	return show_slab_objects(s, buf, SO_ALL);
}
SLAB_ATTR_RO(slabs);

6312 6313 6314 6315 6316 6317
static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{
	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
SLAB_ATTR_RO(total_objects);

6318 6319 6320 6321 6322 6323
static ssize_t objects_show(struct kmem_cache *s, char *buf)
{
	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
}
SLAB_ATTR_RO(objects);

Christoph Lameter's avatar
Christoph Lameter committed
6324 6325
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{
6326
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
Christoph Lameter's avatar
Christoph Lameter committed
6327
}
6328
SLAB_ATTR_RO(sanity_checks);
Christoph Lameter's avatar
Christoph Lameter committed
6329 6330 6331

static ssize_t trace_show(struct kmem_cache *s, char *buf)
{
6332
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
Christoph Lameter's avatar
Christoph Lameter committed
6333
}
6334
SLAB_ATTR_RO(trace);
Christoph Lameter's avatar
Christoph Lameter committed
6335 6336 6337

static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{
6338
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
Christoph Lameter's avatar
Christoph Lameter committed
6339 6340
}

6341
SLAB_ATTR_RO(red_zone);
Christoph Lameter's avatar
Christoph Lameter committed
6342 6343 6344

static ssize_t poison_show(struct kmem_cache *s, char *buf)
{
6345
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
Christoph Lameter's avatar
Christoph Lameter committed
6346 6347
}

6348
SLAB_ATTR_RO(poison);
Christoph Lameter's avatar
Christoph Lameter committed
6349 6350 6351

static ssize_t store_user_show(struct kmem_cache *s, char *buf)
{
6352
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
Christoph Lameter's avatar
Christoph Lameter committed
6353 6354
}

6355
SLAB_ATTR_RO(store_user);
Christoph Lameter's avatar
Christoph Lameter committed
6356

6357 6358 6359 6360 6361 6362 6363 6364
static ssize_t validate_show(struct kmem_cache *s, char *buf)
{
	return 0;
}

static ssize_t validate_store(struct kmem_cache *s,
			const char *buf, size_t length)
{
6365 6366
	int ret = -EINVAL;

6367
	if (buf[0] == '1' && kmem_cache_debug(s)) {
6368 6369 6370 6371 6372
		ret = validate_slab_cache(s);
		if (ret >= 0)
			ret = length;
	}
	return ret;
6373 6374
}
SLAB_ATTR(validate);
6375 6376 6377 6378 6379 6380

#endif /* CONFIG_SLUB_DEBUG */

#ifdef CONFIG_FAILSLAB
static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{
6381
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6382
}
6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397

static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
				size_t length)
{
	if (s->refcount > 1)
		return -EINVAL;

	if (buf[0] == '1')
		WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
	else
		WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);

	return length;
}
SLAB_ATTR(failslab);
6398
#endif
6399

6400 6401 6402 6403 6404 6405 6406 6407
static ssize_t shrink_show(struct kmem_cache *s, char *buf)
{
	return 0;
}

static ssize_t shrink_store(struct kmem_cache *s,
			const char *buf, size_t length)
{
6408
	if (buf[0] == '1')
6409
		kmem_cache_shrink(s);
6410
	else
6411 6412 6413 6414 6415
		return -EINVAL;
	return length;
}
SLAB_ATTR(shrink);

Christoph Lameter's avatar
Christoph Lameter committed
6416
#ifdef CONFIG_NUMA
6417
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter's avatar
Christoph Lameter committed
6418
{
6419
	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter's avatar
Christoph Lameter committed
6420 6421
}

6422
static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter's avatar
Christoph Lameter committed
6423 6424
				const char *buf, size_t length)
{
6425
	unsigned int ratio;
6426 6427
	int err;

6428
	err = kstrtouint(buf, 10, &ratio);
6429 6430
	if (err)
		return err;
6431 6432
	if (ratio > 100)
		return -ERANGE;
6433

6434
	s->remote_node_defrag_ratio = ratio * 10;
Christoph Lameter's avatar
Christoph Lameter committed
6435 6436 6437

	return length;
}
6438
SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter's avatar
Christoph Lameter committed
6439 6440
#endif

6441 6442 6443 6444 6445
#ifdef CONFIG_SLUB_STATS
static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
{
	unsigned long sum  = 0;
	int cpu;
6446
	int len = 0;
6447
	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
6448 6449 6450 6451 6452

	if (!data)
		return -ENOMEM;

	for_each_online_cpu(cpu) {
6453
		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6454 6455 6456 6457 6458

		data[cpu] = x;
		sum += x;
	}

6459
	len += sysfs_emit_at(buf, len, "%lu", sum);
6460

6461
#ifdef CONFIG_SMP
6462
	for_each_online_cpu(cpu) {
6463 6464 6465
		if (data[cpu])
			len += sysfs_emit_at(buf, len, " C%d=%u",
					     cpu, data[cpu]);
6466
	}
6467
#endif
6468
	kfree(data);
6469 6470 6471
	len += sysfs_emit_at(buf, len, "\n");

	return len;
6472 6473
}

6474 6475 6476 6477 6478
static void clear_stat(struct kmem_cache *s, enum stat_item si)
{
	int cpu;

	for_each_online_cpu(cpu)
6479
		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6480 6481
}

6482 6483 6484 6485 6486
#define STAT_ATTR(si, text) 					\
static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
{								\
	return show_stat(s, buf, si);				\
}								\
6487 6488 6489 6490 6491 6492 6493 6494 6495
static ssize_t text##_store(struct kmem_cache *s,		\
				const char *buf, size_t length)	\
{								\
	if (buf[0] != '0')					\
		return -EINVAL;					\
	clear_stat(s, si);					\
	return length;						\
}								\
SLAB_ATTR(text);						\
6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506

STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
STAT_ATTR(FREE_FASTPATH, free_fastpath);
STAT_ATTR(FREE_SLOWPATH, free_slowpath);
STAT_ATTR(FREE_FROZEN, free_frozen);
STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
STAT_ATTR(ALLOC_SLAB, alloc_slab);
STAT_ATTR(ALLOC_REFILL, alloc_refill);
6507
STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
6508 6509 6510 6511 6512 6513 6514
STAT_ATTR(FREE_SLAB, free_slab);
STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
6515
STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
6516
STAT_ATTR(ORDER_FALLBACK, order_fallback);
6517 6518
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
6519 6520
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
6521 6522
STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
6523
#endif	/* CONFIG_SLUB_STATS */
6524

6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547
#ifdef CONFIG_KFENCE
static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
{
	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
}

static ssize_t skip_kfence_store(struct kmem_cache *s,
			const char *buf, size_t length)
{
	int ret = length;

	if (buf[0] == '0')
		s->flags &= ~SLAB_SKIP_KFENCE;
	else if (buf[0] == '1')
		s->flags |= SLAB_SKIP_KFENCE;
	else
		ret = -EINVAL;

	return ret;
}
SLAB_ATTR(skip_kfence);
#endif

6548
static struct attribute *slab_attrs[] = {
Christoph Lameter's avatar
Christoph Lameter committed
6549 6550 6551 6552
	&slab_size_attr.attr,
	&object_size_attr.attr,
	&objs_per_slab_attr.attr,
	&order_attr.attr,
6553
	&min_partial_attr.attr,
6554
	&cpu_partial_attr.attr,
6555
	&objects_partial_attr.attr,
Christoph Lameter's avatar
Christoph Lameter committed
6556 6557 6558 6559 6560 6561 6562 6563
	&partial_attr.attr,
	&cpu_slabs_attr.attr,
	&ctor_attr.attr,
	&aliases_attr.attr,
	&align_attr.attr,
	&hwcache_align_attr.attr,
	&reclaim_account_attr.attr,
	&destroy_by_rcu_attr.attr,
6564
	&shrink_attr.attr,
6565
	&slabs_cpu_partial_attr.attr,
6566
#ifdef CONFIG_SLUB_DEBUG
6567
	&total_objects_attr.attr,
6568
	&objects_attr.attr,
6569 6570 6571
	&slabs_attr.attr,
	&sanity_checks_attr.attr,
	&trace_attr.attr,
Christoph Lameter's avatar
Christoph Lameter committed
6572 6573 6574
	&red_zone_attr.attr,
	&poison_attr.attr,
	&store_user_attr.attr,
6575
	&validate_attr.attr,
6576
#endif
Christoph Lameter's avatar
Christoph Lameter committed
6577 6578 6579 6580
#ifdef CONFIG_ZONE_DMA
	&cache_dma_attr.attr,
#endif
#ifdef CONFIG_NUMA
6581
	&remote_node_defrag_ratio_attr.attr,
6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593
#endif
#ifdef CONFIG_SLUB_STATS
	&alloc_fastpath_attr.attr,
	&alloc_slowpath_attr.attr,
	&free_fastpath_attr.attr,
	&free_slowpath_attr.attr,
	&free_frozen_attr.attr,
	&free_add_partial_attr.attr,
	&free_remove_partial_attr.attr,
	&alloc_from_partial_attr.attr,
	&alloc_slab_attr.attr,
	&alloc_refill_attr.attr,
6594
	&alloc_node_mismatch_attr.attr,
6595 6596 6597 6598 6599 6600 6601
	&free_slab_attr.attr,
	&cpuslab_flush_attr.attr,
	&deactivate_full_attr.attr,
	&deactivate_empty_attr.attr,
	&deactivate_to_head_attr.attr,
	&deactivate_to_tail_attr.attr,
	&deactivate_remote_frees_attr.attr,
6602
	&deactivate_bypass_attr.attr,
6603
	&order_fallback_attr.attr,
6604 6605
	&cmpxchg_double_fail_attr.attr,
	&cmpxchg_double_cpu_fail_attr.attr,
6606 6607
	&cpu_partial_alloc_attr.attr,
	&cpu_partial_free_attr.attr,
6608 6609
	&cpu_partial_node_attr.attr,
	&cpu_partial_drain_attr.attr,
Christoph Lameter's avatar
Christoph Lameter committed
6610
#endif
6611 6612 6613
#ifdef CONFIG_FAILSLAB
	&failslab_attr.attr,
#endif
6614
#ifdef CONFIG_HARDENED_USERCOPY
6615
	&usersize_attr.attr,
6616
#endif
6617 6618 6619
#ifdef CONFIG_KFENCE
	&skip_kfence_attr.attr,
#endif
6620

Christoph Lameter's avatar
Christoph Lameter committed
6621 6622 6623
	NULL
};

6624
static const struct attribute_group slab_attr_group = {
Christoph Lameter's avatar
Christoph Lameter committed
6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640
	.attrs = slab_attrs,
};

static ssize_t slab_attr_show(struct kobject *kobj,
				struct attribute *attr,
				char *buf)
{
	struct slab_attribute *attribute;
	struct kmem_cache *s;

	attribute = to_slab_attr(attr);
	s = to_slab(kobj);

	if (!attribute->show)
		return -EIO;

6641
	return attribute->show(s, buf);
Christoph Lameter's avatar
Christoph Lameter committed
6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656
}

static ssize_t slab_attr_store(struct kobject *kobj,
				struct attribute *attr,
				const char *buf, size_t len)
{
	struct slab_attribute *attribute;
	struct kmem_cache *s;

	attribute = to_slab_attr(attr);
	s = to_slab(kobj);

	if (!attribute->store)
		return -EIO;

6657
	return attribute->store(s, buf, len);
Christoph Lameter's avatar
Christoph Lameter committed
6658 6659
}

6660 6661 6662 6663 6664
static void kmem_cache_release(struct kobject *k)
{
	slab_kmem_cache_release(to_slab(k));
}

6665
static const struct sysfs_ops slab_sysfs_ops = {
Christoph Lameter's avatar
Christoph Lameter committed
6666 6667 6668 6669
	.show = slab_attr_show,
	.store = slab_attr_store,
};

6670
static const struct kobj_type slab_ktype = {
Christoph Lameter's avatar
Christoph Lameter committed
6671
	.sysfs_ops = &slab_sysfs_ops,
6672
	.release = kmem_cache_release,
Christoph Lameter's avatar
Christoph Lameter committed
6673 6674
};

6675
static struct kset *slab_kset;
Christoph Lameter's avatar
Christoph Lameter committed
6676

6677 6678 6679 6680 6681
static inline struct kset *cache_kset(struct kmem_cache *s)
{
	return slab_kset;
}

6682
#define ID_STR_LENGTH 32
Christoph Lameter's avatar
Christoph Lameter committed
6683 6684

/* Create a unique string id for a slab cache:
Christoph Lameter's avatar
Christoph Lameter committed
6685 6686
 *
 * Format	:[flags-]size
Christoph Lameter's avatar
Christoph Lameter committed
6687 6688 6689 6690 6691 6692
 */
static char *create_unique_id(struct kmem_cache *s)
{
	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
	char *p = name;

6693 6694
	if (!name)
		return ERR_PTR(-ENOMEM);
Christoph Lameter's avatar
Christoph Lameter committed
6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705

	*p++ = ':';
	/*
	 * First flags affecting slabcache operations. We will only
	 * get here for aliasable slabs so we do not need to support
	 * too many flags. The flags here must cover all flags that
	 * are matched during merging to guarantee that the id is
	 * unique.
	 */
	if (s->flags & SLAB_CACHE_DMA)
		*p++ = 'd';
6706 6707
	if (s->flags & SLAB_CACHE_DMA32)
		*p++ = 'D';
Christoph Lameter's avatar
Christoph Lameter committed
6708 6709
	if (s->flags & SLAB_RECLAIM_ACCOUNT)
		*p++ = 'a';
6710
	if (s->flags & SLAB_CONSISTENCY_CHECKS)
Christoph Lameter's avatar
Christoph Lameter committed
6711
		*p++ = 'F';
6712 6713
	if (s->flags & SLAB_ACCOUNT)
		*p++ = 'A';
Christoph Lameter's avatar
Christoph Lameter committed
6714 6715
	if (p != name + 1)
		*p++ = '-';
6716
	p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
6717

6718 6719 6720 6721
	if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
		kfree(name);
		return ERR_PTR(-EINVAL);
	}
6722
	kmsan_unpoison_memory(name, p - name);
Christoph Lameter's avatar
Christoph Lameter committed
6723 6724 6725 6726 6727 6728 6729
	return name;
}

static int sysfs_slab_add(struct kmem_cache *s)
{
	int err;
	const char *name;
6730
	struct kset *kset = cache_kset(s);
6731
	int unmergeable = slab_unmergeable(s);
Christoph Lameter's avatar
Christoph Lameter committed
6732

6733 6734 6735 6736
	if (!unmergeable && disable_higher_order_debug &&
			(slub_debug & DEBUG_METADATA_FLAGS))
		unmergeable = 1;

Christoph Lameter's avatar
Christoph Lameter committed
6737 6738 6739 6740 6741 6742
	if (unmergeable) {
		/*
		 * Slabcache can never be merged so we can use the name proper.
		 * This is typically the case for debug situations. In that
		 * case we can catch duplicate names easily.
		 */
6743
		sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter's avatar
Christoph Lameter committed
6744 6745 6746 6747 6748 6749 6750
		name = s->name;
	} else {
		/*
		 * Create a unique name for the slab as a target
		 * for the symlinks.
		 */
		name = create_unique_id(s);
6751 6752
		if (IS_ERR(name))
			return PTR_ERR(name);
Christoph Lameter's avatar
Christoph Lameter committed
6753 6754
	}

6755
	s->kobj.kset = kset;
6756
	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
6757
	if (err)
6758
		goto out;
Christoph Lameter's avatar
Christoph Lameter committed
6759 6760

	err = sysfs_create_group(&s->kobj, &slab_attr_group);
6761 6762
	if (err)
		goto out_del_kobj;
6763

Christoph Lameter's avatar
Christoph Lameter committed
6764 6765 6766 6767
	if (!unmergeable) {
		/* Setup first alias */
		sysfs_slab_alias(s, s->name);
	}
6768 6769 6770 6771 6772 6773 6774
out:
	if (!unmergeable)
		kfree(name);
	return err;
out_del_kobj:
	kobject_del(&s->kobj);
	goto out;
Christoph Lameter's avatar
Christoph Lameter committed
6775 6776
}

6777 6778
void sysfs_slab_unlink(struct kmem_cache *s)
{
6779
	kobject_del(&s->kobj);
6780 6781
}

6782 6783
void sysfs_slab_release(struct kmem_cache *s)
{
6784
	kobject_put(&s->kobj);
Christoph Lameter's avatar
Christoph Lameter committed
6785 6786 6787 6788
}

/*
 * Need to buffer aliases during bootup until sysfs becomes
6789
 * available lest we lose that information.
Christoph Lameter's avatar
Christoph Lameter committed
6790 6791 6792 6793 6794 6795 6796
 */
struct saved_alias {
	struct kmem_cache *s;
	const char *name;
	struct saved_alias *next;
};

Adrian Bunk's avatar
Adrian Bunk committed
6797
static struct saved_alias *alias_list;
Christoph Lameter's avatar
Christoph Lameter committed
6798 6799 6800 6801 6802

static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
	struct saved_alias *al;

6803
	if (slab_state == FULL) {
Christoph Lameter's avatar
Christoph Lameter committed
6804 6805 6806
		/*
		 * If we have a leftover link then remove it.
		 */
6807 6808
		sysfs_remove_link(&slab_kset->kobj, name);
		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter's avatar
Christoph Lameter committed
6809 6810 6811 6812 6813 6814 6815 6816 6817 6818
	}

	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
	if (!al)
		return -ENOMEM;

	al->s = s;
	al->name = name;
	al->next = alias_list;
	alias_list = al;
6819
	kmsan_unpoison_memory(al, sizeof(*al));
Christoph Lameter's avatar
Christoph Lameter committed
6820 6821 6822 6823 6824
	return 0;
}

static int __init slab_sysfs_init(void)
{
6825
	struct kmem_cache *s;
Christoph Lameter's avatar
Christoph Lameter committed
6826 6827
	int err;

6828
	mutex_lock(&slab_mutex);
6829

6830
	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
6831
	if (!slab_kset) {
6832
		mutex_unlock(&slab_mutex);
6833
		pr_err("Cannot register slab subsystem.\n");
6834
		return -ENOMEM;
Christoph Lameter's avatar
Christoph Lameter committed
6835 6836
	}

6837
	slab_state = FULL;
6838

6839
	list_for_each_entry(s, &slab_caches, list) {
6840
		err = sysfs_slab_add(s);
6841
		if (err)
6842 6843
			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
			       s->name);
6844
	}
Christoph Lameter's avatar
Christoph Lameter committed
6845 6846 6847 6848 6849 6850

	while (alias_list) {
		struct saved_alias *al = alias_list;

		alias_list = alias_list->next;
		err = sysfs_slab_alias(al->s, al->name);
6851
		if (err)
6852 6853
			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
			       al->name);
Christoph Lameter's avatar
Christoph Lameter committed
6854 6855 6856
		kfree(al);
	}

6857
	mutex_unlock(&slab_mutex);
Christoph Lameter's avatar
Christoph Lameter committed
6858 6859
	return 0;
}
6860
late_initcall(slab_sysfs_init);
6861
#endif /* SLAB_SUPPORTS_SYSFS */
6862

6863 6864 6865 6866
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
	struct loc_track *t = seq->private;
6867 6868
	struct location *l;
	unsigned long idx;
6869

6870
	idx = (unsigned long) t->idx;
6871 6872 6873 6874 6875 6876 6877 6878 6879 6880
	if (idx < t->count) {
		l = &t->loc[idx];

		seq_printf(seq, "%7ld ", l->count);

		if (l->addr)
			seq_printf(seq, "%pS", (void *)l->addr);
		else
			seq_puts(seq, "<not-available>");

6881 6882 6883 6884
		if (l->waste)
			seq_printf(seq, " waste=%lu/%lu",
				l->count * l->waste, l->waste);

6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905
		if (l->sum_time != l->min_time) {
			seq_printf(seq, " age=%ld/%llu/%ld",
				l->min_time, div_u64(l->sum_time, l->count),
				l->max_time);
		} else
			seq_printf(seq, " age=%ld", l->min_time);

		if (l->min_pid != l->max_pid)
			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
		else
			seq_printf(seq, " pid=%ld",
				l->min_pid);

		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
			seq_printf(seq, " cpus=%*pbl",
				 cpumask_pr_args(to_cpumask(l->cpus)));

		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
			seq_printf(seq, " nodes=%*pbl",
				 nodemask_pr_args(&l->nodes));

6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920
#ifdef CONFIG_STACKDEPOT
		{
			depot_stack_handle_t handle;
			unsigned long *entries;
			unsigned int nr_entries, j;

			handle = READ_ONCE(l->handle);
			if (handle) {
				nr_entries = stack_depot_fetch(handle, &entries);
				seq_puts(seq, "\n");
				for (j = 0; j < nr_entries; j++)
					seq_printf(seq, "        %pS\n", (void *)entries[j]);
			}
		}
#endif
6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937
		seq_puts(seq, "\n");
	}

	if (!idx && !t->count)
		seq_puts(seq, "No data\n");

	return 0;
}

static void slab_debugfs_stop(struct seq_file *seq, void *v)
{
}

static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{
	struct loc_track *t = seq->private;

6938
	t->idx = ++(*ppos);
6939
	if (*ppos <= t->count)
6940
		return ppos;
6941 6942 6943 6944

	return NULL;
}

6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955
static int cmp_loc_by_count(const void *a, const void *b, const void *data)
{
	struct location *loc1 = (struct location *)a;
	struct location *loc2 = (struct location *)b;

	if (loc1->count > loc2->count)
		return -1;
	else
		return 1;
}

6956 6957
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
6958 6959 6960
	struct loc_track *t = seq->private;

	t->idx = *ppos;
6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979
	return ppos;
}

static const struct seq_operations slab_debugfs_sops = {
	.start  = slab_debugfs_start,
	.next   = slab_debugfs_next,
	.stop   = slab_debugfs_stop,
	.show   = slab_debugfs_show,
};

static int slab_debug_trace_open(struct inode *inode, struct file *filep)
{

	struct kmem_cache_node *n;
	enum track_item alloc;
	int node;
	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
						sizeof(struct loc_track));
	struct kmem_cache *s = file_inode(filep)->i_private;
6980 6981
	unsigned long *obj_map;

6982 6983 6984
	if (!t)
		return -ENOMEM;

6985
	obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6986 6987
	if (!obj_map) {
		seq_release_private(inode, filep);
6988
		return -ENOMEM;
6989
	}
6990 6991 6992 6993 6994 6995

	if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
		alloc = TRACK_ALLOC;
	else
		alloc = TRACK_FREE;

6996 6997
	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
		bitmap_free(obj_map);
6998
		seq_release_private(inode, filep);
6999
		return -ENOMEM;
7000
	}
7001 7002 7003

	for_each_kmem_cache_node(s, node, n) {
		unsigned long flags;
7004
		struct slab *slab;
7005

7006
		if (!node_nr_slabs(n))
7007 7008 7009
			continue;

		spin_lock_irqsave(&n->list_lock, flags);
7010 7011 7012 7013
		list_for_each_entry(slab, &n->partial, slab_list)
			process_slab(t, s, slab, alloc, obj_map);
		list_for_each_entry(slab, &n->full, slab_list)
			process_slab(t, s, slab, alloc, obj_map);
7014 7015 7016
		spin_unlock_irqrestore(&n->list_lock, flags);
	}

7017 7018 7019 7020
	/* Sort locations by count */
	sort_r(t->loc, t->count, sizeof(struct location),
		cmp_loc_by_count, NULL, NULL);

7021
	bitmap_free(obj_map);
7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058
	return 0;
}

static int slab_debug_trace_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct loc_track *t = seq->private;

	free_loc_track(t);
	return seq_release_private(inode, file);
}

static const struct file_operations slab_debugfs_fops = {
	.open    = slab_debug_trace_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = slab_debug_trace_release,
};

static void debugfs_slab_add(struct kmem_cache *s)
{
	struct dentry *slab_cache_dir;

	if (unlikely(!slab_debugfs_root))
		return;

	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);

	debugfs_create_file("alloc_traces", 0400,
		slab_cache_dir, s, &slab_debugfs_fops);

	debugfs_create_file("free_traces", 0400,
		slab_cache_dir, s, &slab_debugfs_fops);
}

void debugfs_slab_release(struct kmem_cache *s)
{
7059
	debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076
}

static int __init slab_debugfs_init(void)
{
	struct kmem_cache *s;

	slab_debugfs_root = debugfs_create_dir("slab", NULL);

	list_for_each_entry(s, &slab_caches, list)
		if (s->flags & SLAB_STORE_USER)
			debugfs_slab_add(s);

	return 0;

}
__initcall(slab_debugfs_init);
#endif
7077 7078 7079
/*
 * The /proc/slabinfo ABI
 */
7080
#ifdef CONFIG_SLUB_DEBUG
7081
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7082 7083
{
	unsigned long nr_slabs = 0;
7084 7085
	unsigned long nr_objs = 0;
	unsigned long nr_free = 0;
7086
	int node;
7087
	struct kmem_cache_node *n;
7088

7089
	for_each_kmem_cache_node(s, node, n) {
7090 7091
		nr_slabs += node_nr_slabs(n);
		nr_objs += node_nr_objs(n);
7092
		nr_free += count_partial(n, count_free);
7093 7094
	}

7095 7096 7097 7098 7099 7100
	sinfo->active_objs = nr_objs - nr_free;
	sinfo->num_objs = nr_objs;
	sinfo->active_slabs = nr_slabs;
	sinfo->num_slabs = nr_slabs;
	sinfo->objects_per_slab = oo_objects(s->oo);
	sinfo->cache_order = oo_order(s->oo);
7101
}
7102
#endif /* CONFIG_SLUB_DEBUG */