Commit e4c1112c authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Change function prototype

In later patch, we use the vma and psize to do tlb flush. Do the prototype
update in separate patch to make the review easy.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 044003b5
......@@ -235,9 +235,10 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
static inline void __ptep_set_access_flags(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address)
unsigned long address,
int psize)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
......
......@@ -767,12 +767,14 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
* Generic functions with hash/radix callbacks
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address)
unsigned long address,
int psize)
{
if (radix_enabled())
return radix__ptep_set_access_flags(mm, ptep, entry, address);
return radix__ptep_set_access_flags(vma, ptep, entry,
address, psize);
return hash__ptep_set_access_flags(ptep, entry);
}
......
......@@ -124,8 +124,9 @@ extern void radix__mark_rodata_ro(void);
extern void radix__mark_initmem_nx(void);
#endif
extern void radix__ptep_set_access_flags(struct mm_struct *mm, pte_t *ptep,
pte_t entry, unsigned long address);
extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
pte_t entry, unsigned long address,
int psize);
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
unsigned long set)
......
......@@ -256,9 +256,10 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
static inline void __ptep_set_access_flags(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address)
unsigned long address,
int psize)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
......
......@@ -281,9 +281,10 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to flush the hash entry
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address)
unsigned long address,
int psize)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
......
......@@ -46,8 +46,12 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
#endif
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
pmd_pte(entry), address);
/*
* We can use MMU_PAGE_2M here, because only radix
* path look at the psize.
*/
__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
pmd_pte(entry), address, MMU_PAGE_2M);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
......
......@@ -1085,10 +1085,10 @@ int radix__has_transparent_hugepage(void)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
void radix__ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry,
unsigned long address)
void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
pte_t entry, unsigned long address, int psize)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);
......
......@@ -222,7 +222,8 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pte_same(*(ptep), entry);
if (changed) {
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
__ptep_set_access_flags(vma, ptep, entry,
address, mmu_virtual_psize);
flush_tlb_page(vma, address);
}
return changed;
......@@ -242,15 +243,26 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ptep_set_access_flags(vma, addr, ptep, pte, dirty);
return 1;
#else
int changed;
int changed, psize;
pte = set_access_flags_filter(pte, vma, dirty);
changed = !pte_same(*(ptep), pte);
if (changed) {
#ifdef CONFIG_PPC_BOOK3S_64
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
#else
/*
* Not used on non book3s64 platforms. But 8xx
* can possibly use tsize derived from hstate.
*/
psize = 0;
#endif
#ifdef CONFIG_DEBUG_VM
assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
__ptep_set_access_flags(vma->vm_mm, ptep, pte, addr);
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
flush_hugetlb_page(vma, addr);
}
return changed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment