Commit e618c957 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

x86: unify PAE/non-PAE pgd_ctor

The constructors for PAE and non-PAE pgd_ctors are more or less
identical, and can be made into the same function.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Cc: William Irwin <wli@holomorphy.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c66315e0
...@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd) ...@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd)
list_del(&page->lru); list_del(&page->lru);
} }
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
static void pgd_ctor(void *p)
#if (PTRS_PER_PMD == 1)
/* Non-PAE pgd constructor */
static void pgd_ctor(void *pgd)
{ {
pgd_t *pgd = p;
unsigned long flags; unsigned long flags;
/* !PAE, no pagetable sharing */ /* Clear usermode parts of PGD */
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
spin_lock_irqsave(&pgd_lock, flags); spin_lock_irqsave(&pgd_lock, flags);
/* must happen under lock */ /* If the pgd points to a shared pagetable level (either the
clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, ptes in non-PAE, or shared PMD in PAE), then just copy the
swapper_pg_dir + USER_PTRS_PER_PGD, references from swapper_pg_dir. */
KERNEL_PGD_PTRS); if (PAGETABLE_LEVELS == 2 ||
paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
__pa(swapper_pg_dir) >> PAGE_SHIFT, clone_pgd_range(pgd + USER_PTRS_PER_PGD,
USER_PTRS_PER_PGD,
KERNEL_PGD_PTRS);
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
#else /* PTRS_PER_PMD > 1 */
/* PAE pgd constructor */
static void pgd_ctor(void *pgd)
{
/* PAE, kernel PMD may be shared */
if (SHARED_KERNEL_PMD) {
clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
KERNEL_PGD_PTRS); KERNEL_PGD_PTRS);
} else { paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
unsigned long flags; __pa(swapper_pg_dir) >> PAGE_SHIFT,
USER_PTRS_PER_PGD,
KERNEL_PGD_PTRS);
}
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); /* list required to sync kernel mapping updates */
spin_lock_irqsave(&pgd_lock, flags); if (!SHARED_KERNEL_PMD)
pgd_list_add(pgd); pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
} spin_unlock_irqrestore(&pgd_lock, flags);
} }
#endif /* PTRS_PER_PMD */
static void pgd_dtor(void *pgd) static void pgd_dtor(void *pgd)
{ {
...@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd) ...@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd)
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&pgd_lock, flags);
} }
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
/* /*
* Mop up any pmd pages which may still be attached to the pgd. * Mop up any pmd pages which may still be attached to the pgd.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment