Commit 16cb0ec7 authored by Tobin C. Harding's avatar Tobin C. Harding Committed by Linus Torvalds

slab: use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs.  We
have a list in the page structure (slab_list) that can be used for this
purpose.  Doing so makes the code cleaner since we are not overloading the
lru list.

Use the slab_list instead of the lru list for maintaining lists of slabs.

Link: http://lkml.kernel.org/r/20190402230545.2929-7-tobin@kernel.orgSigned-off-by: default avatarTobin C. Harding <tobin@kernel.org>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 916ac052
...@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) ...@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{ {
struct page *page, *n; struct page *page, *n;
list_for_each_entry_safe(page, n, list, lru) { list_for_each_entry_safe(page, n, list, slab_list) {
list_del(&page->lru); list_del(&page->slab_list);
slab_destroy(cachep, page); slab_destroy(cachep, page);
} }
} }
...@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache,
goto out; goto out;
} }
page = list_entry(p, struct page, lru); page = list_entry(p, struct page, slab_list);
list_del(&page->lru); list_del(&page->slab_list);
n->free_slabs--; n->free_slabs--;
n->total_slabs--; n->total_slabs--;
/* /*
...@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) ...@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
if (!page) if (!page)
return; return;
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->slab_list);
n = get_node(cachep, page_to_nid(page)); n = get_node(cachep, page_to_nid(page));
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
n->total_slabs++; n->total_slabs++;
if (!page->active) { if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free)); list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++; n->free_slabs++;
} else } else
fixup_slab_list(cachep, n, page, &list); fixup_slab_list(cachep, n, page, &list);
...@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep, ...@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
void **list) void **list)
{ {
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&page->lru); list_del(&page->slab_list);
if (page->active == cachep->num) { if (page->active == cachep->num) {
list_add(&page->lru, &n->slabs_full); list_add(&page->slab_list, &n->slabs_full);
if (OBJFREELIST_SLAB(cachep)) { if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG #if DEBUG
/* Poisoning will be done without holding the lock */ /* Poisoning will be done without holding the lock */
...@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep, ...@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
page->freelist = NULL; page->freelist = NULL;
} }
} else } else
list_add(&page->lru, &n->slabs_partial); list_add(&page->slab_list, &n->slabs_partial);
} }
/* Try to find non-pfmemalloc slab if needed */ /* Try to find non-pfmemalloc slab if needed */
...@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, ...@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
} }
/* Move pfmemalloc slab to the end of list to speed up next search */ /* Move pfmemalloc slab to the end of list to speed up next search */
list_del(&page->lru); list_del(&page->slab_list);
if (!page->active) { if (!page->active) {
list_add_tail(&page->lru, &n->slabs_free); list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++; n->free_slabs++;
} else } else
list_add_tail(&page->lru, &n->slabs_partial); list_add_tail(&page->slab_list, &n->slabs_partial);
list_for_each_entry(page, &n->slabs_partial, lru) { list_for_each_entry(page, &n->slabs_partial, slab_list) {
if (!PageSlabPfmemalloc(page)) if (!PageSlabPfmemalloc(page))
return page; return page;
} }
n->free_touched = 1; n->free_touched = 1;
list_for_each_entry(page, &n->slabs_free, lru) { list_for_each_entry(page, &n->slabs_free, slab_list) {
if (!PageSlabPfmemalloc(page)) { if (!PageSlabPfmemalloc(page)) {
n->free_slabs--; n->free_slabs--;
return page; return page;
...@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) ...@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
struct page *page; struct page *page;
assert_spin_locked(&n->list_lock); assert_spin_locked(&n->list_lock);
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); page = list_first_entry_or_null(&n->slabs_partial, struct page,
slab_list);
if (!page) { if (!page) {
n->free_touched = 1; n->free_touched = 1;
page = list_first_entry_or_null(&n->slabs_free, struct page, page = list_first_entry_or_null(&n->slabs_free, struct page,
lru); slab_list);
if (page) if (page)
n->free_slabs--; n->free_slabs--;
} }
...@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp, ...@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
objp = objpp[i]; objp = objpp[i];
page = virt_to_head_page(objp); page = virt_to_head_page(objp);
list_del(&page->lru); list_del(&page->slab_list);
check_spinlock_acquired_node(cachep, node); check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp); slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep); STATS_DEC_ACTIVE(cachep);
/* fixup slab chains */ /* fixup slab chains */
if (page->active == 0) { if (page->active == 0) {
list_add(&page->lru, &n->slabs_free); list_add(&page->slab_list, &n->slabs_free);
n->free_slabs++; n->free_slabs++;
} else { } else {
/* Unconditionally move a slab to the end of the /* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the * partial list on free - maximum time for the
* other objects to be freed, too. * other objects to be freed, too.
*/ */
list_add_tail(&page->lru, &n->slabs_partial); list_add_tail(&page->slab_list, &n->slabs_partial);
} }
} }
while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
n->free_objects -= cachep->num; n->free_objects -= cachep->num;
page = list_last_entry(&n->slabs_free, struct page, lru); page = list_last_entry(&n->slabs_free, struct page, slab_list);
list_move(&page->lru, list); list_move(&page->slab_list, list);
n->free_slabs--; n->free_slabs--;
n->total_slabs--; n->total_slabs--;
} }
...@@ -3438,7 +3439,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3438,7 +3439,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
int i = 0; int i = 0;
struct page *page; struct page *page;
list_for_each_entry(page, &n->slabs_free, lru) { list_for_each_entry(page, &n->slabs_free, slab_list) {
BUG_ON(page->active); BUG_ON(page->active);
i++; i++;
...@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p) ...@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on(); check_irq_on();
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(page, &n->slabs_full, lru) list_for_each_entry(page, &n->slabs_full, slab_list)
handle_slab(x, cachep, page); handle_slab(x, cachep, page);
list_for_each_entry(page, &n->slabs_partial, lru) list_for_each_entry(page, &n->slabs_partial, slab_list)
handle_slab(x, cachep, page); handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment