Commit 7e48c101 authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S PR: Rework kvmppc_mmu_book3s_64_xlate()

This reworks kvmppc_mmu_book3s_64_xlate() to make it check the large
page bit in the hashed page table entries (HPTEs) it looks at, and
to simplify and streamline the code.  The checking of the first dword
of each HPTE is now done with a single mask and compare operation,
and all the code dealing with the matching HPTE, if we find one,
is consolidated in one place in the main line of the function flow.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 8b23de29
...@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
hva_t ptegp; hva_t ptegp;
u64 pteg[16]; u64 pteg[16];
u64 avpn = 0; u64 avpn = 0;
u64 v, r;
u64 v_val, v_mask;
u64 eaddr_mask;
int i; int i;
u8 key = 0; u8 pp, key = 0;
bool found = false; bool found = false;
int second = 0; bool second = false;
ulong mp_ea = vcpu->arch.magic_page_ea; ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */ /* Magic page override */
...@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
goto no_seg_found; goto no_seg_found;
avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
v_val = avpn & HPTE_V_AVPN;
if (slbe->tb) if (slbe->tb)
avpn |= SLB_VSID_B_1T; v_val |= SLB_VSID_B_1T;
if (slbe->large)
v_val |= HPTE_V_LARGE;
v_val |= HPTE_V_VALID;
v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
HPTE_V_SECONDARY;
do_second: do_second:
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
...@@ -227,91 +238,74 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -227,91 +238,74 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
key = 4; key = 4;
for (i=0; i<16; i+=2) { for (i=0; i<16; i+=2) {
u64 v = pteg[i]; /* Check all relevant fields of 1st dword */
u64 r = pteg[i+1]; if ((pteg[i] & v_mask) == v_val) {
/* Valid check */
if (!(v & HPTE_V_VALID))
continue;
/* Hash check */
if ((v & HPTE_V_SECONDARY) != second)
continue;
/* AVPN compare */
if (HPTE_V_COMPARE(avpn, v)) {
u8 pp = (r & HPTE_R_PP) | key;
int eaddr_mask = 0xFFF;
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
eaddr,
data);
if (slbe->large)
eaddr_mask = 0xFFFFFF;
gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
gpte->may_read = false;
gpte->may_write = false;
switch (pp) {
case 0:
case 1:
case 2:
case 6:
gpte->may_write = true;
/* fall through */
case 3:
case 5:
case 7:
gpte->may_read = true;
break;
}
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
"-> 0x%lx\n",
eaddr, avpn, gpte->vpage, gpte->raddr);
found = true; found = true;
break; break;
} }
} }
/* Update PTE R and C bits, so the guest's swapper knows we used the if (!found) {
* page */ if (second)
if (found) { goto no_page_found;
u32 oldr = pteg[i+1]; v_val |= HPTE_V_SECONDARY;
second = true;
goto do_second;
}
if (gpte->may_read) { v = pteg[i];
/* Set the accessed flag */ r = pteg[i+1];
pteg[i+1] |= HPTE_R_R; pp = (r & HPTE_R_PP) | key;
} eaddr_mask = 0xFFF;
if (gpte->may_write) {
/* Set the dirty flag */ gpte->eaddr = eaddr;
pteg[i+1] |= HPTE_R_C; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
} else { if (slbe->large)
dprintk("KVM: Mapping read-only page!\n"); eaddr_mask = 0xFFFFFF;
} gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
gpte->may_read = false;
gpte->may_write = false;
switch (pp) {
case 0:
case 1:
case 2:
case 6:
gpte->may_write = true;
/* fall through */
case 3:
case 5:
case 7:
gpte->may_read = true;
break;
}
/* Write back into the PTEG */ dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
if (pteg[i+1] != oldr) "-> 0x%lx\n",
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); eaddr, avpn, gpte->vpage, gpte->raddr);
if (!gpte->may_read) /* Update PTE R and C bits, so the guest's swapper knows we used the
return -EPERM; * page */
return 0; if (gpte->may_read) {
} else { /* Set the accessed flag */
dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " r |= HPTE_R_R;
"ptegp=0x%lx)\n", }
eaddr, to_book3s(vcpu)->sdr1, ptegp); if (data && gpte->may_write) {
for (i = 0; i < 16; i += 2) /* Set the dirty flag -- XXX even if not writing */
dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n", r |= HPTE_R_C;
i, pteg[i], pteg[i+1], avpn); }
if (!second) { /* Write back into the PTEG */
second = HPTE_V_SECONDARY; if (pteg[i+1] != r) {
goto do_second; pteg[i+1] = r;
} copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
} }
if (!gpte->may_read)
return -EPERM;
return 0;
no_page_found: no_page_found:
return -ENOENT; return -ENOENT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment