Commit 044003b5 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/radix: Move function from radix.h to pgtable-radix.c

In later patch we will update them which require them to be moved
to pgtable-radix.c. Keeping the function in radix.h results in
compile warning as below.

./arch/powerpc/include/asm/book3s/64/radix.h: In function ‘radix__ptep_set_access_flags’:
./arch/powerpc/include/asm/book3s/64/radix.h:196:28: error: dereferencing pointer to incomplete type ‘struct vm_area_struct’
  struct mm_struct *mm = vma->vm_mm;
                            ^~
./arch/powerpc/include/asm/book3s/64/radix.h:204:6: error: implicit declaration of function ‘atomic_read’; did you mean ‘__atomic_load’? [-Werror=implicit-function-declaration]
      atomic_read(&mm->context.copros) > 0) {
      ^~~~~~~~~~~
      __atomic_load
./arch/powerpc/include/asm/book3s/64/radix.h:204:21: error: dereferencing pointer to incomplete type ‘struct mm_struct’
      atomic_read(&mm->context.copros) > 0) {

Instead of fixing header dependencies, we move the function to pgtable-radix.c
Also the function is now large to be a static inline . Doing the
move in separate patch helps in review.

No functional change in this patch. Only code movement.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent f069ff39
...@@ -124,6 +124,9 @@ extern void radix__mark_rodata_ro(void); ...@@ -124,6 +124,9 @@ extern void radix__mark_rodata_ro(void);
extern void radix__mark_initmem_nx(void); extern void radix__mark_initmem_nx(void);
#endif #endif
extern void radix__ptep_set_access_flags(struct mm_struct *mm, pte_t *ptep,
pte_t entry, unsigned long address);
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
unsigned long set) unsigned long set)
{ {
...@@ -190,34 +193,6 @@ static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -190,34 +193,6 @@ static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
return __pte(old_pte); return __pte(old_pte);
} }
/*
* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to invalidate tlb.
*/
static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
/*
* new value of pte
*/
new_pte = old_pte | set;
radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
__radix_pte_update(ptep, 0, new_pte);
} else
__radix_pte_update(ptep, 0, set);
asm volatile("ptesync" : : : "memory");
}
static inline int radix__pte_same(pte_t pte_a, pte_t pte_b) static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
{ {
return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0); return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
......
...@@ -1084,3 +1084,25 @@ int radix__has_transparent_hugepage(void) ...@@ -1084,3 +1084,25 @@ int radix__has_transparent_hugepage(void)
return 0; return 0;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
void radix__ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
/*
* new value of pte
*/
new_pte = old_pte | set;
radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
__radix_pte_update(ptep, 0, new_pte);
} else
__radix_pte_update(ptep, 0, set);
asm volatile("ptesync" : : : "memory");
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment