Commit c2092c12 authored by Vlastimil Babka's avatar Vlastimil Babka

mm/slub: Finish struct page to struct slab conversion

Update comments mentioning pages to mention slabs where appropriate.
Also some goto labels.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
parent bb192ed9
...@@ -99,7 +99,7 @@ struct kmem_cache { ...@@ -99,7 +99,7 @@ struct kmem_cache {
#ifdef CONFIG_SLUB_CPU_PARTIAL #ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */ /* Number of per cpu partial objects to keep around */
unsigned int cpu_partial; unsigned int cpu_partial;
/* Number of per cpu partial pages to keep around */ /* Number of per cpu partial slabs to keep around */
unsigned int cpu_partial_slabs; unsigned int cpu_partial_slabs;
#endif #endif
struct kmem_cache_order_objects oo; struct kmem_cache_order_objects oo;
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* 1. slab_mutex (Global Mutex) * 1. slab_mutex (Global Mutex)
* 2. node->list_lock (Spinlock) * 2. node->list_lock (Spinlock)
* 3. kmem_cache->cpu_slab->lock (Local lock) * 3. kmem_cache->cpu_slab->lock (Local lock)
* 4. slab_lock(page) (Only on some arches or for debugging) * 4. slab_lock(slab) (Only on some arches or for debugging)
* 5. object_map_lock (Only for debugging) * 5. object_map_lock (Only for debugging)
* *
* slab_mutex * slab_mutex
...@@ -64,19 +64,19 @@ ...@@ -64,19 +64,19 @@
* *
* The slab_lock is only used for debugging and on arches that do not * The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects: * have the ability to do a cmpxchg_double. It only protects:
* A. page->freelist -> List of object free in a page * A. slab->freelist -> List of free objects in a slab
* B. page->inuse -> Number of objects in use * B. slab->inuse -> Number of objects in use
* C. page->objects -> Number of objects in page * C. slab->objects -> Number of objects in slab
* D. page->frozen -> frozen state * D. slab->frozen -> frozen state
* *
* Frozen slabs * Frozen slabs
* *
* If a slab is frozen then it is exempt from list management. It is not * If a slab is frozen then it is exempt from list management. It is not
* on any list except per cpu partial list. The processor that froze the * on any list except per cpu partial list. The processor that froze the
* slab is the one who can perform list operations on the page. Other * slab is the one who can perform list operations on the slab. Other
* processors may put objects onto the freelist but the processor that * processors may put objects onto the freelist but the processor that
* froze the slab is the only one that can retrieve the objects from the * froze the slab is the only one that can retrieve the objects from the
* page's freelist. * slab's freelist.
* *
* list_lock * list_lock
* *
...@@ -135,7 +135,7 @@ ...@@ -135,7 +135,7 @@
* minimal so we rely on the page allocators per cpu caches for * minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs. * fast frees and allocs.
* *
* page->frozen The slab is frozen and exempt from list processing. * slab->frozen The slab is frozen and exempt from list processing.
* This means that the slab is dedicated to a purpose * This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific * such as satisfying allocations for a specific
* processor. Objects may be freed in the slab while * processor. Objects may be freed in the slab while
...@@ -250,7 +250,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) ...@@ -250,7 +250,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define OO_SHIFT 16 #define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1) #define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
/* Internal SLUB flags */ /* Internal SLUB flags */
/* Poison object */ /* Poison object */
...@@ -423,8 +423,8 @@ static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) ...@@ -423,8 +423,8 @@ static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/* /*
* We take the number of objects but actually limit the number of * We take the number of objects but actually limit the number of
* pages on the per cpu partial list, in order to limit excessive * slabs on the per cpu partial list, in order to limit excessive
* growth of the list. For simplicity we assume that the pages will * growth of the list. For simplicity we assume that the slabs will
* be half-full. * be half-full.
*/ */
nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
...@@ -594,9 +594,9 @@ static inline bool slab_add_kunit_errors(void) { return false; } ...@@ -594,9 +594,9 @@ static inline bool slab_add_kunit_errors(void) { return false; }
#endif #endif
/* /*
* Determine a map of object in use on a page. * Determine a map of objects in use in a slab.
* *
* Node listlock must be held to guarantee that the page does * Node listlock must be held to guarantee that the slab does
* not vanish from under us. * not vanish from under us.
*/ */
static unsigned long *get_map(struct kmem_cache *s, struct slab *slab) static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
...@@ -1139,7 +1139,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab) ...@@ -1139,7 +1139,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab)
} }
/* /*
* Determine if a certain object on a page is on the freelist. Must hold the * Determine if a certain object in a slab is on the freelist. Must hold the
* slab lock to guarantee that the chains are in a consistent state. * slab lock to guarantee that the chains are in a consistent state.
*/ */
static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
...@@ -2184,7 +2184,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, ...@@ -2184,7 +2184,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
} }
/* /*
* Get a page from somewhere. Search in increasing NUMA distances. * Get a slab from somewhere. Search in increasing NUMA distances.
*/ */
static void *get_any_partial(struct kmem_cache *s, gfp_t flags, static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
struct slab **ret_slab) struct slab **ret_slab)
...@@ -2248,7 +2248,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, ...@@ -2248,7 +2248,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
} }
/* /*
* Get a partial page, lock it and return it. * Get a partial slab, lock it and return it.
*/ */
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct slab **ret_slab) struct slab **ret_slab)
...@@ -2340,7 +2340,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) ...@@ -2340,7 +2340,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
} }
/* /*
* Finishes removing the cpu slab. Merges cpu's freelist with page's freelist, * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
* unfreezes the slabs and puts it on the proper list. * unfreezes the slabs and puts it on the proper list.
* Assumes the slab has been already safely taken away from kmem_cache_cpu * Assumes the slab has been already safely taken away from kmem_cache_cpu
* by the caller. * by the caller.
...@@ -2387,18 +2387,18 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab, ...@@ -2387,18 +2387,18 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
} }
/* /*
* Stage two: Unfreeze the page while splicing the per-cpu * Stage two: Unfreeze the slab while splicing the per-cpu
* freelist to the head of page's freelist. * freelist to the head of slab's freelist.
* *
* Ensure that the page is unfrozen while the list presence * Ensure that the slab is unfrozen while the list presence
* reflects the actual number of objects during unfreeze. * reflects the actual number of objects during unfreeze.
* *
* We setup the list membership and then perform a cmpxchg * We setup the list membership and then perform a cmpxchg
* with the count. If there is a mismatch then the page * with the count. If there is a mismatch then the slab
* is not unfrozen but the page is on the wrong list. * is not unfrozen but the slab is on the wrong list.
* *
* Then we restart the process which may have to remove * Then we restart the process which may have to remove
* the page from the list that we just put it on again * the slab from the list that we just put it on again
* because the number of objects in the slab may have * because the number of objects in the slab may have
* changed. * changed.
*/ */
...@@ -2426,9 +2426,8 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab, ...@@ -2426,9 +2426,8 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
if (!lock) { if (!lock) {
lock = 1; lock = 1;
/* /*
* Taking the spinlock removes the possibility * Taking the spinlock removes the possibility that
* that acquire_slab() will see a slab page that * acquire_slab() will see a slab that is frozen
* is frozen
*/ */
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
} }
...@@ -2569,8 +2568,8 @@ static void unfreeze_partials_cpu(struct kmem_cache *s, ...@@ -2569,8 +2568,8 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
} }
/* /*
* Put a page that was just frozen (in __slab_free|get_partial_node) into a * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
* partial page slot if available. * partial slab slot if available.
* *
* If we did not find a slot then simply move all the partials to the * If we did not find a slot then simply move all the partials to the
* per node partial list. * per node partial list.
...@@ -2841,12 +2840,12 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) ...@@ -2841,12 +2840,12 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
} }
/* /*
* Check the page->freelist of a page and either transfer the freelist to the * Check the slab->freelist and either transfer the freelist to the
* per cpu freelist or deactivate the page. * per cpu freelist or deactivate the slab.
* *
* The page is still frozen if the return value is not NULL. * The slab is still frozen if the return value is not NULL.
* *
* If this function returns NULL then the page has been unfrozen. * If this function returns NULL then the slab has been unfrozen.
*/ */
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
{ {
...@@ -2902,7 +2901,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2902,7 +2901,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLOWPATH); stat(s, ALLOC_SLOWPATH);
reread_page: reread_slab:
slab = READ_ONCE(c->slab); slab = READ_ONCE(c->slab);
if (!slab) { if (!slab) {
...@@ -2939,11 +2938,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2939,11 +2938,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!pfmemalloc_match(slab, gfpflags))) if (unlikely(!pfmemalloc_match(slab, gfpflags)))
goto deactivate_slab; goto deactivate_slab;
/* must check again c->page in case we got preempted and it changed */ /* must check again c->slab in case we got preempted and it changed */
local_lock_irqsave(&s->cpu_slab->lock, flags); local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(slab != c->slab)) { if (unlikely(slab != c->slab)) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags); local_unlock_irqrestore(&s->cpu_slab->lock, flags);
goto reread_page; goto reread_slab;
} }
freelist = c->freelist; freelist = c->freelist;
if (freelist) if (freelist)
...@@ -2966,8 +2965,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2966,8 +2965,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
/* /*
* freelist is pointing to the list of objects to be used. * freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained. * slab is pointing to the slab from which the objects are obtained.
* That page must be frozen for per cpu allocations to work. * That slab must be frozen for per cpu allocations to work.
*/ */
VM_BUG_ON(!c->slab->frozen); VM_BUG_ON(!c->slab->frozen);
c->freelist = get_freepointer(s, freelist); c->freelist = get_freepointer(s, freelist);
...@@ -2980,7 +2979,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2980,7 +2979,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_lock_irqsave(&s->cpu_slab->lock, flags); local_lock_irqsave(&s->cpu_slab->lock, flags);
if (slab != c->slab) { if (slab != c->slab) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags); local_unlock_irqrestore(&s->cpu_slab->lock, flags);
goto reread_page; goto reread_slab;
} }
freelist = c->freelist; freelist = c->freelist;
c->slab = NULL; c->slab = NULL;
...@@ -2994,7 +2993,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2994,7 +2993,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_lock_irqsave(&s->cpu_slab->lock, flags); local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(c->slab)) { if (unlikely(c->slab)) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags); local_unlock_irqrestore(&s->cpu_slab->lock, flags);
goto reread_page; goto reread_slab;
} }
if (unlikely(!slub_percpu_partial(c))) { if (unlikely(!slub_percpu_partial(c))) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags); local_unlock_irqrestore(&s->cpu_slab->lock, flags);
...@@ -3013,7 +3012,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3013,7 +3012,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
freelist = get_partial(s, gfpflags, node, &slab); freelist = get_partial(s, gfpflags, node, &slab);
if (freelist) if (freelist)
goto check_new_page; goto check_new_slab;
slub_put_cpu_ptr(s->cpu_slab); slub_put_cpu_ptr(s->cpu_slab);
slab = new_slab(s, gfpflags, node); slab = new_slab(s, gfpflags, node);
...@@ -3025,7 +3024,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3025,7 +3024,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
} }
/* /*
* No other reference to the page yet so we can * No other reference to the slab yet so we can
* muck around with it freely without cmpxchg * muck around with it freely without cmpxchg
*/ */
freelist = slab->freelist; freelist = slab->freelist;
...@@ -3033,7 +3032,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3033,7 +3032,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLAB); stat(s, ALLOC_SLAB);
check_new_page: check_new_slab:
if (kmem_cache_debug(s)) { if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, slab, freelist, addr)) { if (!alloc_debug_processing(s, slab, freelist, addr)) {
...@@ -3055,7 +3054,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3055,7 +3054,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*/ */
goto return_single; goto return_single;
retry_load_page: retry_load_slab:
local_lock_irqsave(&s->cpu_slab->lock, flags); local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(c->slab)) { if (unlikely(c->slab)) {
...@@ -3072,7 +3071,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3072,7 +3071,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, CPUSLAB_FLUSH); stat(s, CPUSLAB_FLUSH);
goto retry_load_page; goto retry_load_slab;
} }
c->slab = slab; c->slab = slab;
...@@ -3169,9 +3168,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -3169,9 +3168,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
/* /*
* Irqless object alloc/free algorithm used here depends on sequence * Irqless object alloc/free algorithm used here depends on sequence
* of fetching cpu_slab's data. tid should be fetched before anything * of fetching cpu_slab's data. tid should be fetched before anything
* on c to guarantee that object and page associated with previous tid * on c to guarantee that object and slab associated with previous tid
* won't be used with current tid. If we fetch tid first, object and * won't be used with current tid. If we fetch tid first, object and
* page could be one associated with next tid and our alloc/free * slab could be one associated with next tid and our alloc/free
* request will be failed. In this case, we will retry. So, no problem. * request will be failed. In this case, we will retry. So, no problem.
*/ */
barrier(); barrier();
...@@ -3295,7 +3294,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); ...@@ -3295,7 +3294,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
* have a longer lifetime than the cpu slabs in most processing loads. * have a longer lifetime than the cpu slabs in most processing loads.
* *
* So we still attempt to reduce cache line usage. Just take the slab * So we still attempt to reduce cache line usage. Just take the slab
* lock and free the item. If there is no additional partial page * lock and free the item. If there is no additional partial slab
* handling required then we can return immediately. * handling required then we can return immediately.
*/ */
static void __slab_free(struct kmem_cache *s, struct slab *slab, static void __slab_free(struct kmem_cache *s, struct slab *slab,
...@@ -3373,7 +3372,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, ...@@ -3373,7 +3372,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
stat(s, FREE_FROZEN); stat(s, FREE_FROZEN);
} else if (new.frozen) { } else if (new.frozen) {
/* /*
* If we just froze the page then put it onto the * If we just froze the slab then put it onto the
* per cpu partial list. * per cpu partial list.
*/ */
put_cpu_partial(s, slab, 1); put_cpu_partial(s, slab, 1);
...@@ -3427,7 +3426,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, ...@@ -3427,7 +3426,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
* with all sorts of special processing. * with all sorts of special processing.
* *
* Bulk free of a freelist with several objects (all pointing to the * Bulk free of a freelist with several objects (all pointing to the
* same page) possible by specifying head and tail ptr, plus objects * same slab) possible by specifying head and tail ptr, plus objects
* count (cnt). Bulk free indicated by tail pointer being set. * count (cnt). Bulk free indicated by tail pointer being set.
*/ */
static __always_inline void do_slab_free(struct kmem_cache *s, static __always_inline void do_slab_free(struct kmem_cache *s,
...@@ -4213,7 +4212,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) ...@@ -4213,7 +4212,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
#endif #endif
/* /*
* The larger the object size is, the more pages we want on the partial * The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively. * list to avoid pounding the page allocator excessively.
*/ */
set_min_partial(s, ilog2(s->size) / 2); set_min_partial(s, ilog2(s->size) / 2);
...@@ -4598,12 +4597,12 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s) ...@@ -4598,12 +4597,12 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
* Build lists of slabs to discard or promote. * Build lists of slabs to discard or promote.
* *
* Note that concurrent frees may occur while we hold the * Note that concurrent frees may occur while we hold the
* list_lock. page->inuse here is the upper limit. * list_lock. slab->inuse here is the upper limit.
*/ */
list_for_each_entry_safe(slab, t, &n->partial, slab_list) { list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
int free = slab->objects - slab->inuse; int free = slab->objects - slab->inuse;
/* Do not reread page->inuse */ /* Do not reread slab->inuse */
barrier(); barrier();
/* We do not keep full slabs on the list */ /* We do not keep full slabs on the list */
...@@ -5482,7 +5481,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) ...@@ -5482,7 +5481,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
slabs += slab->slabs; slabs += slab->slabs;
} }
/* Approximate half-full pages , see slub_set_cpu_partial() */ /* Approximate half-full slabs, see slub_set_cpu_partial() */
objects = (slabs * oo_objects(s->oo)) / 2; objects = (slabs * oo_objects(s->oo)) / 2;
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment