Commit f069ff39 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/hugetlb: Update huge_ptep_set_access_flags to call __ptep_set_access_flags directly

In a later patch, we want to update __ptep_set_access_flags take page size
arg. This makes ptep_set_access_flags only work with mmu_virtual_psize.
To simplify the code make huge_ptep_set_access_flags directly call
__ptep_set_access_flags so that we can compute the hugetlb page size in
hugetlb function.

Now that ptep_set_access_flags won't be called for hugetlb remove
the is_vm_hugetlb_page() check and add the assert of pte lock
unconditionally.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 721c551d
...@@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) ...@@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte); return pte_wrprotect(pte);
} }
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t pte, int dirty) pte_t pte, int dirty);
{
#ifdef HUGETLB_NEED_PRELOAD
/*
* The "return 1" forces a call of update_mmu_cache, which will write a
* TLB entry. Without this, platforms that don't do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum.
*/
ptep_set_access_flags(vma, addr, ptep, pte, dirty);
return 1;
#else
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
#endif
}
static inline pte_t huge_ptep_get(pte_t *ptep) static inline pte_t huge_ptep_get(pte_t *ptep)
{ {
......
...@@ -221,7 +221,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, ...@@ -221,7 +221,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
entry = set_access_flags_filter(entry, vma, dirty); entry = set_access_flags_filter(entry, vma, dirty);
changed = !pte_same(*(ptep), entry); changed = !pte_same(*(ptep), entry);
if (changed) { if (changed) {
if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address); assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma->vm_mm, ptep, entry, address); __ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
...@@ -229,6 +228,36 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, ...@@ -229,6 +228,36 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
return changed; return changed;
} }
#ifdef CONFIG_HUGETLB_PAGE
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
#ifdef HUGETLB_NEED_PRELOAD
/*
* The "return 1" forces a call of update_mmu_cache, which will write a
* TLB entry. Without this, platforms that don't do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum.
*/
ptep_set_access_flags(vma, addr, ptep, pte, dirty);
return 1;
#else
int changed;
pte = set_access_flags_filter(pte, vma, dirty);
changed = !pte_same(*(ptep), pte);
if (changed) {
#ifdef CONFIG_DEBUG_VM
assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
__ptep_set_access_flags(vma->vm_mm, ptep, pte, addr);
flush_hugetlb_page(vma, addr);
}
return changed;
#endif
}
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
void assert_pte_locked(struct mm_struct *mm, unsigned long addr) void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment