Commit 07f910f9 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka

mm: Remove slab from struct page

All members of struct slab can now be removed from struct page.
This shrinks the definition of struct page by 30 LOC, making
it easier to understand.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 9cc960a1
...@@ -118,31 +118,6 @@ struct page { ...@@ -118,31 +118,6 @@ struct page {
atomic_long_t pp_frag_count; atomic_long_t pp_frag_count;
}; };
}; };
struct { /* slab, slob and slub */
union {
struct list_head slab_list;
struct { /* Partial pages */
struct page *next;
#ifdef CONFIG_64BIT
int pages; /* Nr of pages left */
#else
short int pages;
#endif
};
};
struct kmem_cache *slab_cache; /* not slob */
/* Double-word boundary */
void *freelist; /* first free object */
union {
void *s_mem; /* slab: first object */
unsigned long counters; /* SLUB */
struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
};
struct { /* Tail pages of compound page */ struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */ unsigned long compound_head; /* Bit zero is set */
...@@ -206,9 +181,6 @@ struct page { ...@@ -206,9 +181,6 @@ struct page {
* which are currently stored here. * which are currently stored here.
*/ */
unsigned int page_type; unsigned int page_type;
unsigned int active; /* SLAB */
int units; /* SLOB */
}; };
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
......
...@@ -909,43 +909,6 @@ extern bool is_free_buddy_page(struct page *page); ...@@ -909,43 +909,6 @@ extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY); __PAGEFLAG(Isolated, isolated, PF_ANY);
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
static inline int PageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);
return PageActive(page);
}
/*
* A version of PageSlabPfmemalloc() for opportunistic checks where the page
* might have been freed under us and not be a PageSlab anymore.
*/
static inline int __PageSlabPfmemalloc(struct page *page)
{
return PageActive(page);
}
static inline void SetPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);
SetPageActive(page);
}
static inline void __ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);
__ClearPageActive(page);
}
static inline void ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);
ClearPageActive(page);
}
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define __PG_MLOCKED (1UL << PG_mlocked) #define __PG_MLOCKED (1UL << PG_mlocked)
#else #else
......
...@@ -67,14 +67,8 @@ struct slab { ...@@ -67,14 +67,8 @@ struct slab {
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
SLAB_MATCH(flags, __page_flags); SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
SLAB_MATCH(slab_list, slab_list);
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
SLAB_MATCH(rcu_head, rcu_head); SLAB_MATCH(rcu_head, rcu_head);
SLAB_MATCH(slab_cache, slab_cache);
#endif
#ifdef CONFIG_SLAB
SLAB_MATCH(s_mem, s_mem);
SLAB_MATCH(active, active);
#endif #endif
SLAB_MATCH(_refcount, __page_refcount); SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment