Commit 0f9578b7 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds

[PATCH] ppc64: More hugepage fixes

My previous patch fixing invalidation of huge PTEs wasn't good enough, we
still had an issue if a PTE invalidation batch contained both small and
large pages.  This patch fixes this by making sure the batch is flushed if
the page size fed to it changes.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 485ef69e
...@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context, ...@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context,
hpte_t *hptep; hpte_t *hptep;
unsigned long hpte_v; unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long large; unsigned long large = batch->large;
local_irq_save(flags); local_irq_save(flags);
...@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context, ...@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context,
va = (vsid << 28) | (batch->addr[i] & 0x0fffffff); va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
batch->vaddr[j] = va; batch->vaddr[j] = va;
large = pte_huge(batch->pte[i]);
if (large) if (large)
vpn = va >> HPAGE_SHIFT; vpn = va >> HPAGE_SHIFT;
else else
...@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context, ...@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) for (i = 0; i < j; i++)
__tlbie(batch->vaddr[i], 0); __tlbie(batch->vaddr[i], large);
asm volatile("eieio; tlbsync; ptesync":::"memory"); asm volatile("eieio; tlbsync; ptesync":::"memory");
......
...@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, ...@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
* up scanning and resetting referenced bits then our batch context * up scanning and resetting referenced bits then our batch context
* will change mid stream. * will change mid stream.
*/ */
if (unlikely(i != 0 && context != batch->context)) { if (i != 0 && (context != batch->context ||
batch->large != pte_huge(pte))) {
flush_tlb_pending(); flush_tlb_pending();
i = 0; i = 0;
} }
...@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, ...@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
if (i == 0) { if (i == 0) {
batch->context = context; batch->context = context;
batch->mm = mm; batch->mm = mm;
batch->large = pte_huge(pte);
} }
batch->pte[i] = __pte(pte); batch->pte[i] = __pte(pte);
batch->addr[i] = addr; batch->addr[i] = addr;
......
...@@ -25,6 +25,7 @@ struct ppc64_tlb_batch { ...@@ -25,6 +25,7 @@ struct ppc64_tlb_batch {
pte_t pte[PPC64_TLB_BATCH_NR]; pte_t pte[PPC64_TLB_BATCH_NR];
unsigned long addr[PPC64_TLB_BATCH_NR]; unsigned long addr[PPC64_TLB_BATCH_NR];
unsigned long vaddr[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR];
unsigned int large;
}; };
DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment