Commit 8e65d24c authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: Do not use page->mapping

After moving the lockless_freelist to kmem_cache_cpu we no longer need
page->lockless_freelist. Restructure the use of the struct page fields in
such a way that we never touch the mapping field.

This is turn allows us to remove the special casing of SLUB when determining
the mapping of a page (needed for corner cases of virtual caches machines that
need to flush caches of processors mapping a page).
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dfb4f096
...@@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page) ...@@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
VM_BUG_ON(PageSlab(page)); VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page))) if (unlikely(PageSwapCache(page)))
mapping = &swapper_space; mapping = &swapper_space;
#ifdef CONFIG_SLUB
else if (unlikely(PageSlab(page)))
mapping = NULL;
#endif
else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL; mapping = NULL;
return mapping; return mapping;
......
...@@ -62,13 +62,8 @@ struct page { ...@@ -62,13 +62,8 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl; spinlock_t ptl;
#endif #endif
struct { /* SLUB uses */ struct kmem_cache *slab; /* SLUB: Pointer to slab */
void **lockless_freelist; struct page *first_page; /* Compound tail pages */
struct kmem_cache *slab; /* Pointer to slab */
};
struct {
struct page *first_page; /* Compound pages */
};
}; };
union { union {
pgoff_t index; /* Our offset within mapping. */ pgoff_t index; /* Our offset within mapping. */
......
...@@ -1127,7 +1127,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1127,7 +1127,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
set_freepointer(s, last, NULL); set_freepointer(s, last, NULL);
page->freelist = start; page->freelist = start;
page->lockless_freelist = NULL;
page->inuse = 0; page->inuse = 0;
out: out:
if (flags & __GFP_WAIT) if (flags & __GFP_WAIT)
...@@ -1153,7 +1152,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1153,7 +1152,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- pages); - pages);
page->mapping = NULL;
__free_pages(page, s->order); __free_pages(page, s->order);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment