Commit 11c03dc8 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Update huge TLB page support to use per-pagetable spinlock

This patch updates the parisc huge TLB page support to use per-pagetable spinlocks.

This patch requires Mikulas' per-pagetable spinlock patch and the revised TLB
serialization patch from Helge and myself.  With Mikulas' patch, we need to use
the per-pagetable spinlock for page table updates.  The TLB lock is only used
to serialize TLB flushes on machines with the Merced bus.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent b37d1c18
...@@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
{ {
unsigned long flags; unsigned long flags;
purge_tlb_start(flags); spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
__set_huge_pte_at(mm, addr, ptep, entry); __set_huge_pte_at(mm, addr, ptep, entry);
purge_tlb_end(flags); spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
} }
...@@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
unsigned long flags; unsigned long flags;
pte_t entry; pte_t entry;
purge_tlb_start(flags); spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
entry = *ptep; entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0)); __set_huge_pte_at(mm, addr, ptep, __pte(0));
purge_tlb_end(flags); spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return entry; return entry;
} }
...@@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, ...@@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long flags; unsigned long flags;
pte_t old_pte; pte_t old_pte;
purge_tlb_start(flags); spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
old_pte = *ptep; old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
purge_tlb_end(flags); spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
} }
int huge_ptep_set_access_flags(struct vm_area_struct *vma, int huge_ptep_set_access_flags(struct vm_area_struct *vma,
...@@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
{ {
unsigned long flags; unsigned long flags;
int changed; int changed;
struct mm_struct *mm = vma->vm_mm;
purge_tlb_start(flags); spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
changed = !pte_same(*ptep, pte); changed = !pte_same(*ptep, pte);
if (changed) { if (changed) {
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte); __set_huge_pte_at(mm, addr, ptep, pte);
} }
purge_tlb_end(flags); spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return changed; return changed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment