Commit 227be799 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: uninline pmdp_xxx functions from pgtable.h

The pmdp_xxx function are smaller than their ptep_xxx counterparts
but to keep things symmetrical unline them as well.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent ebde765c
...@@ -520,15 +520,6 @@ static inline int pmd_bad(pmd_t pmd) ...@@ -520,15 +520,6 @@ static inline int pmd_bad(pmd_t pmd)
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
} }
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PMD_WRITE #define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd) static inline int pmd_write(pmd_t pmd)
{ {
...@@ -1203,54 +1194,51 @@ static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) ...@@ -1203,54 +1194,51 @@ static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
: "cc" ); : "cc" );
} }
static inline void pmdp_flush_direct(struct mm_struct *mm, pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
unsigned long address, pmd_t *pmdp) pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
{
int active, count;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
return;
if (!MACHINE_HAS_IDTE) { #define __HAVE_ARCH_PGTABLE_DEPOSIT
__pmdp_csp(pmdp); void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
return; pgtable_t pgtable);
}
active = (mm == current->active_mm) ? 1 : 0; #define __HAVE_ARCH_PGTABLE_WITHDRAW
count = atomic_add_return(0x10000, &mm->context.attach_count); pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(address, pmdp);
else
__pmdp_idte(address, pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
}
static inline void pmdp_flush_lazy(struct mm_struct *mm, #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
unsigned long address, pmd_t *pmdp) static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp,
pmd_t entry, int dirty)
{ {
int active, count; VM_BUG_ON(addr & ~HPAGE_MASK);
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) entry = pmd_mkyoung(entry);
return; if (dirty)
active = (mm == current->active_mm) ? 1 : 0; entry = pmd_mkdirty(entry);
count = atomic_add_return(0x10000, &mm->context.attach_count); if (pmd_val(*pmdp) == pmd_val(entry))
if ((count & 0xffff) <= active) { return 0;
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
mm->context.flush_mm = 1; return 1;
} else if (MACHINE_HAS_IDTE)
__pmdp_idte(address, pmdp);
else
__pmdp_csp(pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
#define __HAVE_ARCH_PGTABLE_DEPOSIT pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, return pmd_young(pmd);
pgtable_t pgtable); }
#define __HAVE_ARCH_PGTABLE_WITHDRAW #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
VM_BUG_ON(addr & ~HPAGE_MASK);
return pmdp_test_and_clear_young(vma, addr, pmdp);
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry) pmd_t *pmdp, pmd_t entry)
...@@ -1266,66 +1254,48 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) ...@@ -1266,66 +1254,48 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return pmd; return pmd;
} }
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pmd_t pmd;
pmd = *pmdp;
pmdp_flush_direct(vma->vm_mm, address, pmdp);
*pmdp = pmd_mkold(pmd);
return pmd_young(pmd);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmd_t pmd = *pmdp; return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
pmdp_flush_direct(mm, address, pmdp);
pmd_clear(pmdp);
return pmd;
} }
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, unsigned long addr,
pmd_t *pmdp, int full) pmd_t *pmdp, int full)
{ {
pmd_t pmd = *pmdp; if (full) {
pmd_t pmd = *pmdp;
if (!full) *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
pmdp_flush_lazy(mm, address, pmdp); return pmd;
pmd_clear(pmdp); }
return pmd; return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
} }
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
} }
#define __HAVE_ARCH_PMDP_INVALIDATE #define __HAVE_ARCH_PMDP_INVALIDATE
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmdp_flush_direct(vma->vm_mm, address, pmdp); pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
if (pmd_write(pmd)) { if (pmd_write(pmd))
pmdp_flush_direct(mm, address, pmdp); pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
}
} }
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
......
...@@ -105,11 +105,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -105,11 +105,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
pmd_t *pmdp = (pmd_t *) ptep; pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep); pmd_t old;
pmdp_flush_direct(mm, addr, pmdp); old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; return __pmd_to_pte(old);
return pte;
} }
pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_alloc(struct mm_struct *mm,
......
...@@ -1418,6 +1418,74 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, ...@@ -1418,6 +1418,74 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
} }
EXPORT_SYMBOL(ptep_modify_prot_commit); EXPORT_SYMBOL(ptep_modify_prot_commit);
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
int active, count;
pmd_t old;
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
if (!MACHINE_HAS_IDTE) {
__pmdp_csp(pmdp);
return old;
}
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(addr, pmdp);
else
__pmdp_idte(addr, pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
return old;
}
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
int active, count;
pmd_t old;
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if ((count & 0xffff) <= active) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
} else if (MACHINE_HAS_IDTE)
__pmdp_idte(addr, pmdp);
else
__pmdp_csp(pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
return old;
}
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
pmd_t old;
old = pmdp_flush_direct(mm, addr, pmdp);
*pmdp = new;
return old;
}
EXPORT_SYMBOL(pmdp_xchg_direct);
pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
pmd_t old;
old = pmdp_flush_lazy(mm, addr, pmdp);
*pmdp = new;
return old;
}
EXPORT_SYMBOL(pmdp_xchg_lazy);
/* /*
* switch on pgstes for its userspace process (for kvm) * switch on pgstes for its userspace process (for kvm)
*/ */
...@@ -1525,31 +1593,6 @@ void s390_reset_cmma(struct mm_struct *mm) ...@@ -1525,31 +1593,6 @@ void s390_reset_cmma(struct mm_struct *mm)
EXPORT_SYMBOL_GPL(s390_reset_cmma); EXPORT_SYMBOL_GPL(s390_reset_cmma);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* No need to flush TLB
* On s390 reference bits are in storage key and never in TLB */
return pmdp_test_and_clear_young(vma, address, pmdp);
}
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
entry = pmd_mkyoung(entry);
if (dirty)
entry = pmd_mkdirty(entry);
if (pmd_same(*pmdp, entry))
return 0;
pmdp_invalidate(vma, address, pmdp);
set_pmd_at(vma->vm_mm, address, pmdp, entry);
return 1;
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable) pgtable_t pgtable)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment