Commit fab177a4 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: uninline __pte_free_tlb()

The pgalloc.h changes broke ppc64:

In file included from include/asm-generic/tlb.h:18,
                 from include/asm/tlb.h:24,
                 from arch/ppc64/mm/hash_utils.c:48:
include/asm/pgalloc.h: In function `__pte_free_tlb':
include/asm/pgalloc.h:110: dereferencing pointer to incomplete type
include/asm/pgalloc.h:111: dereferencing pointer to incomplete type

Uninlining __pte_free_tlb() fixes that.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d01034ea
......@@ -41,6 +41,33 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;
void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
{
/* This is safe as we are holding page_table_lock */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
pte_free(ptepage);
return;
}
if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
pte_free_now(ptepage);
return;
}
(*batchp)->index = 0;
}
(*batchp)->pages[(*batchp)->index++] = ptepage;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
*batchp = NULL;
}
}
/*
* Update the MMU hash table to correspond with a change to
* a Linux PTE. If wrprot is true, it is permissible to
......
......@@ -101,33 +101,7 @@ extern void pte_free_submit(struct pte_freelist_batch *batch);
DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
{
/* This is safe as we are holding page_table_lock */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
pte_free(ptepage);
return;
}
if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
pte_free_now(ptepage);
return;
}
(*batchp)->index = 0;
}
(*batchp)->pages[(*batchp)->index++] = ptepage;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
*batchp = NULL;
}
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage);
#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
#define check_pgt_cache() do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment