Commit 7d6e7f7f authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/thp: Return pte address if we find trans_splitting.

For THP that is marked trans splitting, we return the pte.
This require the callers to handle the pmd_trans_splitting scenario,
if they care. All the current callers are either looking at pfn or
write_ok, hence we don't need to update them.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 691e95fd
...@@ -281,11 +281,9 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type) ...@@ -281,11 +281,9 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
/* /*
* If it's present and writable, atomically set dirty and referenced bits and * If it's present and writable, atomically set dirty and referenced bits and
* return the PTE, otherwise return 0. If we find a transparent hugepage * return the PTE, otherwise return 0.
* and if it is marked splitting we return 0;
*/ */
static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
unsigned int hugepage)
{ {
pte_t old_pte, new_pte = __pte(0); pte_t old_pte, new_pte = __pte(0);
...@@ -301,12 +299,6 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, ...@@ -301,12 +299,6 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
cpu_relax(); cpu_relax();
continue; continue;
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* If hugepage and is trans splitting return None */
if (unlikely(hugepage &&
pmd_trans_splitting(pte_pmd(old_pte))))
return __pte(0);
#endif
/* If pte is not present return None */ /* If pte is not present return None */
if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0); return __pte(0);
......
...@@ -537,20 +537,17 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -537,20 +537,17 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
/* if the guest wants write access, see if that is OK */ /* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) { if (!writing && hpte_is_writable(r)) {
unsigned int hugepage_shift;
pte_t *ptep, pte; pte_t *ptep, pte;
unsigned long flags; unsigned long flags;
/* /*
* We need to protect against page table destruction * We need to protect against page table destruction
* while looking up and updating the pte. * hugepage split and collapse.
*/ */
local_irq_save(flags); local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd, ptep = find_linux_pte_or_hugepte(current->mm->pgd,
hva, &hugepage_shift); hva, NULL);
if (ptep) { if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1, pte = kvmppc_read_update_linux_pte(ptep, 1);
hugepage_shift);
if (pte_write(pte)) if (pte_write(pte))
write_ok = 1; write_ok = 1;
} }
......
...@@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
local_irq_restore(flags); local_irq_restore(flags);
return H_PARAMETER; return H_PARAMETER;
} }
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift); pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) { if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
......
...@@ -1014,12 +1014,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, ...@@ -1014,12 +1014,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
* A hugepage collapse is captured by pmd_none, because * A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate. * it mark the pmd none and do a hpte invalidate.
* *
* A hugepage split is captured by pmd_trans_splitting * We don't worry about pmd_trans_splitting here, The
* because we mark the pmd trans splitting and do a * caller if it needs to handle the splitting case
* hpte invalidate * should check for that.
*
*/ */
if (pmd_none(pmd) || pmd_trans_splitting(pmd)) if (pmd_none(pmd))
return NULL; return NULL;
if (pmd_huge(pmd) || pmd_large(pmd)) { if (pmd_huge(pmd) || pmd_large(pmd)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment