Commit 0393895b authored by Vlastimil Babka's avatar Vlastimil Babka

mm/slub: Convert __slab_lock() and __slab_unlock() to struct slab

These functions operate on the PG_locked page flag, but make them accept
struct slab to encapsulate this implementation detail.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
parent d835eef4
...@@ -440,14 +440,18 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) ...@@ -440,14 +440,18 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/* /*
* Per slab locking using the pagelock * Per slab locking using the pagelock
*/ */
static __always_inline void __slab_lock(struct page *page) static __always_inline void __slab_lock(struct slab *slab)
{ {
struct page *page = slab_page(slab);
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
bit_spin_lock(PG_locked, &page->flags); bit_spin_lock(PG_locked, &page->flags);
} }
static __always_inline void __slab_unlock(struct page *page) static __always_inline void __slab_unlock(struct slab *slab)
{ {
struct page *page = slab_page(slab);
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
__bit_spin_unlock(PG_locked, &page->flags); __bit_spin_unlock(PG_locked, &page->flags);
} }
...@@ -456,12 +460,12 @@ static __always_inline void slab_lock(struct page *page, unsigned long *flags) ...@@ -456,12 +460,12 @@ static __always_inline void slab_lock(struct page *page, unsigned long *flags)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_save(*flags); local_irq_save(*flags);
__slab_lock(page); __slab_lock(page_slab(page));
} }
static __always_inline void slab_unlock(struct page *page, unsigned long *flags) static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
{ {
__slab_unlock(page); __slab_unlock(page_slab(page));
if (IS_ENABLED(CONFIG_PREEMPT_RT)) if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(*flags); local_irq_restore(*flags);
} }
...@@ -530,16 +534,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -530,16 +534,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
__slab_lock(page); __slab_lock(page_slab(page));
if (page->freelist == freelist_old && if (page->freelist == freelist_old &&
page->counters == counters_old) { page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
page->counters = counters_new; page->counters = counters_new;
__slab_unlock(page); __slab_unlock(page_slab(page));
local_irq_restore(flags); local_irq_restore(flags);
return true; return true;
} }
__slab_unlock(page); __slab_unlock(page_slab(page));
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment