Commit 7d27a04b authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: move 'private' union within struct page

By moving page->private to the fourth word of struct page, we can put the
SLUB counters in the same word as SLAB's s_mem and still do the
cmpxchg_double trick.  Now the SLUB counters no longer overlap with the
mapcount or refcount so we can drop the call to page_mapcount_reset() and
simplify set_page_slub_counters() to a single line.

Link: http://lkml.kernel.org/r/20180518194519.3820-6-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d4fc5069
...@@ -65,15 +65,9 @@ struct hmm; ...@@ -65,15 +65,9 @@ struct hmm;
*/ */
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) #define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
#define _slub_counter_t unsigned long
#else #else
#define _slub_counter_t unsigned int
#endif
#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
#define _struct_page_alignment #define _struct_page_alignment
#define _slub_counter_t unsigned int #endif
#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
struct page { struct page {
/* First double word block */ /* First double word block */
...@@ -95,6 +89,30 @@ struct page { ...@@ -95,6 +89,30 @@ struct page {
/* page_deferred_list().prev -- second tail page */ /* page_deferred_list().prev -- second tail page */
}; };
union {
/*
* Mapping-private opaque data:
* Usually used for buffer_heads if PagePrivate
* Used for swp_entry_t if PageSwapCache
* Indicates order in the buddy system if PageBuddy
*/
unsigned long private;
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
#endif
#endif
void *s_mem; /* slab first object */
unsigned long counters; /* SLUB */
struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
union { union {
/* /*
* If the page is neither PageSlab nor mappable to userspace, * If the page is neither PageSlab nor mappable to userspace,
...@@ -104,13 +122,7 @@ struct page { ...@@ -104,13 +122,7 @@ struct page {
*/ */
unsigned int page_type; unsigned int page_type;
_slub_counter_t counters;
unsigned int active; /* SLAB */ unsigned int active; /* SLAB */
struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
int units; /* SLOB */ int units; /* SLOB */
struct { /* Page cache */ struct { /* Page cache */
...@@ -179,24 +191,6 @@ struct page { ...@@ -179,24 +191,6 @@ struct page {
#endif #endif
}; };
union {
/*
* Mapping-private opaque data:
* Usually used for buffer_heads if PagePrivate
* Used for swp_entry_t if PageSwapCache
* Indicates order in the buddy system if PageBuddy
*/
unsigned long private;
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
#endif
#endif
void *s_mem; /* slab first object */
};
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
#endif #endif
......
...@@ -356,21 +356,6 @@ static __always_inline void slab_unlock(struct page *page) ...@@ -356,21 +356,6 @@ static __always_inline void slab_unlock(struct page *page)
__bit_spin_unlock(PG_locked, &page->flags); __bit_spin_unlock(PG_locked, &page->flags);
} }
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
struct page tmp;
tmp.counters = counters_new;
/*
* page->counters can cover frozen/inuse/objects as well
* as page->_refcount. If we assign to ->counters directly
* we run the risk of losing updates to page->_refcount, so
* be careful and only assign to the fields we need.
*/
page->frozen = tmp.frozen;
page->inuse = tmp.inuse;
page->objects = tmp.objects;
}
/* Interrupts must be disabled (for the fallback code to work right) */ /* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old, void *freelist_old, unsigned long counters_old,
...@@ -392,7 +377,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -392,7 +377,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
if (page->freelist == freelist_old && if (page->freelist == freelist_old &&
page->counters == counters_old) { page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
set_page_slub_counters(page, counters_new); page->counters = counters_new;
slab_unlock(page); slab_unlock(page);
return true; return true;
} }
...@@ -431,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -431,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
if (page->freelist == freelist_old && if (page->freelist == freelist_old &&
page->counters == counters_old) { page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
set_page_slub_counters(page, counters_new); page->counters = counters_new;
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); local_irq_restore(flags);
return true; return true;
...@@ -1694,7 +1679,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1694,7 +1679,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page); __ClearPageSlab(page);
page_mapcount_reset(page);
page->mapping = NULL; page->mapping = NULL;
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages; current->reclaim_state->reclaimed_slab += pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment