Commit 61728d1e authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slub: Pass kmem_cache struct to lock and freeze slab

We need more information about the slab for the cmpxchg implementation.
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 5cc6eee8
...@@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n, ...@@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n,
* *
* Must hold list_lock. * Must hold list_lock.
*/ */
static inline int lock_and_freeze_slab(struct kmem_cache_node *n, static inline int lock_and_freeze_slab(struct kmem_cache *s,
struct page *page) struct kmem_cache_node *n, struct page *page)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
remove_partial(n, page); remove_partial(n, page);
...@@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, ...@@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
/* /*
* Try to allocate a partial slab from a specific node. * Try to allocate a partial slab from a specific node.
*/ */
static struct page *get_partial_node(struct kmem_cache_node *n) static struct page *get_partial_node(struct kmem_cache *s,
struct kmem_cache_node *n)
{ {
struct page *page; struct page *page;
...@@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) ...@@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru) list_for_each_entry(page, &n->partial, lru)
if (lock_and_freeze_slab(n, page)) if (lock_and_freeze_slab(s, n, page))
goto out; goto out;
page = NULL; page = NULL;
out: out:
...@@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) ...@@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
if (n && cpuset_zone_allowed_hardwall(zone, flags) && if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) { n->nr_partial > s->min_partial) {
page = get_partial_node(n); page = get_partial_node(s, n);
if (page) { if (page) {
put_mems_allowed(); put_mems_allowed();
return page; return page;
...@@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
struct page *page; struct page *page;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
page = get_partial_node(get_node(s, searchnode)); page = get_partial_node(s, get_node(s, searchnode));
if (page || node != NUMA_NO_NODE) if (page || node != NUMA_NO_NODE)
return page; return page;
...@@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{ {
void *prior; void *prior;
void **object = (void *)x; void **object = (void *)x;
unsigned long flags; unsigned long uninitialized_var(flags);
local_irq_save(flags); local_irq_save(flags);
slab_lock(page); slab_lock(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment