Commit a85cb652 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Use a slab cache for pgd and pmd pages

From Bill Irwin

This allocates pgd's and pmd's using the slab and slab ctors.  It has a
benefit beyond preconstruction in that PAE pmd's are accounted via
/proc/slabinfo

Profiling of kernel builds by Martin Bligh shows a 30-40% drop in CPU load
due to pgd_alloc()'s page clearing activity.  But this was already a tiny
fraction of the overall CPU time.
parent 3ac8c845
...@@ -509,15 +509,27 @@ void __init mem_init(void) ...@@ -509,15 +509,27 @@ void __init mem_init(void)
} }
#if CONFIG_X86_PAE #if CONFIG_X86_PAE
struct kmem_cache_s *pae_pgd_cachep; #include <linux/slab.h>
kmem_cache_t *pae_pmd_cachep;
kmem_cache_t *pae_pgd_cachep;
void pae_pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pae_pgd_ctor(void *, kmem_cache_t *, unsigned long);
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
/* /*
* PAE pgds must be 16-byte aligned: * PAE pgds must be 16-byte aligned:
*/ */
pae_pmd_cachep = kmem_cache_create("pae_pmd", 4096, 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pmd_ctor, NULL);
if (!pae_pmd_cachep)
panic("init_pae(): cannot allocate pae_pmd SLAB cache");
pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0, pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pgd_ctor, NULL);
if (!pae_pgd_cachep) if (!pae_pgd_cachep)
panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
} }
......
...@@ -168,38 +168,57 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -168,38 +168,57 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
#if CONFIG_X86_PAE #if CONFIG_X86_PAE
extern kmem_cache_t *pae_pmd_cachep;
void pae_pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags)
{
clear_page(__pmd);
}
void pae_pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags)
{
pgd_t *pgd = __pgd;
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
int i; int i;
pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL); pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, SLAB_KERNEL);
if (pgd) { if (!pgd)
for (i = 0; i < USER_PTRS_PER_PGD; i++) { return NULL;
unsigned long pmd = __get_free_page(GFP_KERNEL);
if (!pmd) for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
goto out_oom; pmd_t *pmd = kmem_cache_alloc(pae_pmd_cachep, SLAB_KERNEL);
clear_page(pmd); if (!pmd)
set_pgd(pgd + i, __pgd(1 + __pa(pmd))); goto out_oom;
else if ((unsigned long)pmd & ~PAGE_MASK) {
printk("kmem_cache_alloc did wrong! death ensues!\n");
goto out_oom;
} }
memcpy(pgd + USER_PTRS_PER_PGD, set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd))));
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
} }
return pgd; return pgd;
out_oom: out_oom:
for (i--; i >= 0; i--) for (i--; i >= 0; --i)
free_page((unsigned long)__va(pgd_val(pgd[i])-1)); kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd); kmem_cache_free(pae_pgd_cachep, (void *)pgd);
return NULL; return NULL;
} }
void pgd_free(pgd_t *pgd) void pgd_free(pgd_t *pgd)
{ {
int i; int i;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
for (i = 0; i < USER_PTRS_PER_PGD; i++) kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1));
free_page((unsigned long)__va(pgd_val(pgd[i])-1)); set_pgd(pgd + i, __pgd(0));
kmem_cache_free(pae_pgd_cachep, pgd); }
kmem_cache_free(pae_pgd_cachep, (void *)pgd);
} }
#else #else
......
...@@ -20,11 +20,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p ...@@ -20,11 +20,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* Allocate and free page tables. * Allocate and free page tables.
*/ */
extern pgd_t *pgd_alloc(struct mm_struct *); pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(pgd_t *pgd); void pgd_free(pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); struct page *pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment