Commit a74791dd authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: add helpers to get/set mm.context->pte_frag

In order to handle pte_fragment functions with single fragment
without adding pte_frag in all mm_context_t, this patch creates
two helpers which do nothing on platforms using a single fragment.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent d09780f3
...@@ -110,6 +110,31 @@ void mark_initmem_nx(void); ...@@ -110,6 +110,31 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { } static inline void mark_initmem_nx(void) { }
#endif #endif
/*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h
* so we are sure it is included when arriving here.
*/
#ifdef PTE_FRAG_NR
static inline void *pte_frag_get(mm_context_t *ctx)
{
return ctx->pte_frag;
}
static inline void pte_frag_set(mm_context_t *ctx, void *p)
{
ctx->pte_frag = p;
}
#else
static inline void *pte_frag_get(mm_context_t *ctx)
{
return NULL;
}
static inline void pte_frag_set(mm_context_t *ctx, void *p)
{
}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */ #endif /* _ASM_POWERPC_PGTABLE_H */
...@@ -38,7 +38,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm) ...@@ -38,7 +38,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
return NULL; return NULL;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
ret = mm->context.pte_frag; ret = pte_frag_get(&mm->context);
if (ret) { if (ret) {
pte_frag = ret + PTE_FRAG_SIZE; pte_frag = ret + PTE_FRAG_SIZE;
/* /*
...@@ -46,7 +46,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm) ...@@ -46,7 +46,7 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
*/ */
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
pte_frag = NULL; pte_frag = NULL;
mm->context.pte_frag = pte_frag; pte_frag_set(&mm->context, pte_frag);
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return (pte_t *)ret; return (pte_t *)ret;
...@@ -86,9 +86,9 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) ...@@ -86,9 +86,9 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
* the allocated page with single fragement * the allocated page with single fragement
* count. * count.
*/ */
if (likely(!mm->context.pte_frag)) { if (likely(!pte_frag_get(&mm->context))) {
atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE; pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment