Commit 056aad67 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Christoffer Dall

kvm: arm/arm64: Rework gpa callback handlers

In order to perform an operation on a gpa range, we currently iterate
over each page in a user memory slot for the given range. This is
inefficient while dealing with a big range (e.g, a VMA), especially
while unmaping a range. At present, with stage2 unmap on a range with
a hugepage backed region, we clear the PMD when we unmap the first
page in the loop. The remaining iterations simply traverse the page table
down to the PMD level only to see that nothing is in there.

This patch reworks the code to invoke the callback handlers on the
biggest range possible within the memory slot to to reduce the number of
times the handler is called.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <cdall@linaro.org>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarChristoffer Dall <cdall@linaro.org>
parent 97da3854
...@@ -1512,7 +1512,8 @@ static int handle_hva_to_gpa(struct kvm *kvm, ...@@ -1512,7 +1512,8 @@ static int handle_hva_to_gpa(struct kvm *kvm,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
int (*handler)(struct kvm *kvm, int (*handler)(struct kvm *kvm,
gpa_t gpa, void *data), gpa_t gpa, u64 size,
void *data),
void *data) void *data)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
...@@ -1524,7 +1525,7 @@ static int handle_hva_to_gpa(struct kvm *kvm, ...@@ -1524,7 +1525,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
/* we only care about the pages that the guest sees */ /* we only care about the pages that the guest sees */
kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end; unsigned long hva_start, hva_end;
gfn_t gfn, gfn_end; gfn_t gpa;
hva_start = max(start, memslot->userspace_addr); hva_start = max(start, memslot->userspace_addr);
hva_end = min(end, memslot->userspace_addr + hva_end = min(end, memslot->userspace_addr +
...@@ -1532,25 +1533,16 @@ static int handle_hva_to_gpa(struct kvm *kvm, ...@@ -1532,25 +1533,16 @@ static int handle_hva_to_gpa(struct kvm *kvm,
if (hva_start >= hva_end) if (hva_start >= hva_end)
continue; continue;
/* gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
* {gfn(page) | page intersects with [hva_start, hva_end)} = ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
*/
gfn = hva_to_gfn_memslot(hva_start, memslot);
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
for (; gfn < gfn_end; ++gfn) {
gpa_t gpa = gfn << PAGE_SHIFT;
ret |= handler(kvm, gpa, data);
}
} }
return ret; return ret;
} }
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{ {
unmap_stage2_range(kvm, gpa, PAGE_SIZE); unmap_stage2_range(kvm, gpa, size);
return 0; return 0;
} }
...@@ -1577,10 +1569,11 @@ int kvm_unmap_hva_range(struct kvm *kvm, ...@@ -1577,10 +1569,11 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0; return 0;
} }
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{ {
pte_t *pte = (pte_t *)data; pte_t *pte = (pte_t *)data;
WARN_ON(size != PAGE_SIZE);
/* /*
* We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
* flag clear because MMU notifiers will have unmapped a huge PMD before * flag clear because MMU notifiers will have unmapped a huge PMD before
...@@ -1606,11 +1599,12 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -1606,11 +1599,12 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
} }
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{ {
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
pmd = stage2_get_pmd(kvm, NULL, gpa); pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */ if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0; return 0;
...@@ -1625,11 +1619,12 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) ...@@ -1625,11 +1619,12 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
return stage2_ptep_test_and_clear_young(pte); return stage2_ptep_test_and_clear_young(pte);
} }
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{ {
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
pmd = stage2_get_pmd(kvm, NULL, gpa); pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */ if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment