Commit 6396b852 authored by Punit Agrawal's avatar Punit Agrawal Committed by Marc Zyngier

KVM: arm/arm64: Re-factor setting the Stage 2 entry to exec on fault

Stage 2 fault handler marks a page as executable if it is handling an
execution fault or if it was a permission fault in which case the
executable bit needs to be preserved.

The logic to decide if the page should be marked executable is
duplicated for PMD and PTE entries. To avoid creating another copy
when support for PUD hugepages is introduced refactor the code to
share the checks needed to mark a page table entry as executable.
Signed-off-by: default avatarPunit Agrawal <punit.agrawal@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 3f58bf63
...@@ -1475,7 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1475,7 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long fault_status) unsigned long fault_status)
{ {
int ret; int ret;
bool write_fault, exec_fault, writable, force_pte = false; bool write_fault, writable, force_pte = false;
bool exec_fault, needs_exec;
unsigned long mmu_seq; unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT; gfn_t gfn = fault_ipa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -1598,19 +1599,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1598,19 +1599,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault) if (exec_fault)
invalidate_icache_guest_page(pfn, vma_pagesize); invalidate_icache_guest_page(pfn, vma_pagesize);
/*
* If we took an execution fault we have made the
* icache/dcache coherent above and should now let the s2
* mapping be executable.
*
* Write faults (!exec_fault && FSC_PERM) are orthogonal to
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
if (vma_pagesize == PMD_SIZE) { if (vma_pagesize == PMD_SIZE) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type); pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd); new_pmd = pmd_mkhuge(new_pmd);
if (writable) if (writable)
new_pmd = kvm_s2pmd_mkwrite(new_pmd); new_pmd = kvm_s2pmd_mkwrite(new_pmd);
if (exec_fault) { if (needs_exec)
new_pmd = kvm_s2pmd_mkexec(new_pmd); new_pmd = kvm_s2pmd_mkexec(new_pmd);
} else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa))
new_pmd = kvm_s2pmd_mkexec(new_pmd);
}
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else { } else {
...@@ -1621,13 +1628,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1621,13 +1628,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mark_page_dirty(kvm, gfn); mark_page_dirty(kvm, gfn);
} }
if (exec_fault) { if (needs_exec)
new_pte = kvm_s2pte_mkexec(new_pte); new_pte = kvm_s2pte_mkexec(new_pte);
} else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa))
new_pte = kvm_s2pte_mkexec(new_pte);
}
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment