Commit df6556ad authored by Oliver Upton's avatar Oliver Upton

KVM: arm64: Correctly handle page aging notifiers for unaligned memslot

Userspace is allowed to select any PAGE_SIZE aligned hva to back guest
memory. This is even the case with hugepages, although it is a rather
suboptimal configuration as PTE level mappings are used at stage-2.

The arm64 page aging handlers have an assumption that the specified
range is exactly one page/block of memory, which in the aforementioned
case is not necessarily true. All together this leads to the WARN() in
kvm_age_gfn() firing.

However, the WARN is only part of the issue as the table walkers visit
at most a single leaf PTE. For hugepage-backed memory in a memslot that
isn't hugepage-aligned, page aging entirely misses accesses to the
hugepage beyond the first page in the memslot.

Add a new walker dedicated to handling page aging MMU notifiers capable
of walking a range of PTEs. Convert kvm(_test)_age_gfn() over to the new
walker and drop the WARN that caught the issue in the first place. The
implementation of this walker was inspired by the test_clear_young()
implementation by Yu Zhao [*], but repurposed to address a bug in the
existing aging implementation.

Cc: stable@vger.kernel.org # v5.15
Fixes: 056aad67 ("kvm: arm/arm64: Rework gpa callback handlers")
Link: https://lore.kernel.org/kvmarm/20230526234435.662652-6-yuzhao@google.com/Co-developed-by: default avatarYu Zhao <yuzhao@google.com>
Signed-off-by: default avatarYu Zhao <yuzhao@google.com>
Reported-by: default avatarReiji Watanabe <reijiw@google.com>
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarShaoqin Huang <shahuang@redhat.com>
Link: https://lore.kernel.org/r/20230627235405.4069823-1-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 970dee09
...@@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); ...@@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
/** /**
* kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
* flag in a page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry. * @addr: Intermediate physical address to identify the page-table entry.
* @size: Size of the address range to visit.
* @mkold: True if the access flag should be cleared.
* *
* The offset of @addr within a page is ignored. * The offset of @addr within a page is ignored.
* *
* If there is a valid, leaf page-table entry used to translate @addr, then * Tests and conditionally clears the access flag for every valid, leaf
* clear the access flag in that entry. * page-table entry used to translate the range [@addr, @addr + @size).
* *
* Note that it is the caller's responsibility to invalidate the TLB after * Note that it is the caller's responsibility to invalidate the TLB after
* calling this function to ensure that the updated permissions are visible * calling this function to ensure that the updated permissions are visible
* to the CPUs. * to the CPUs.
* *
* Return: The old page-table entry prior to clearing the flag, 0 on failure. * Return: True if any of the visited PTEs had the access flag set.
*/ */
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
u64 size, bool mkold);
/** /**
* kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
...@@ -645,18 +649,6 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); ...@@ -645,18 +649,6 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
enum kvm_pgtable_prot prot); enum kvm_pgtable_prot prot);
/**
* kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
* access flag set.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
*
* The offset of @addr within a page is ignored.
*
* Return: True if the page-table entry has the access flag set, false otherwise.
*/
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
/** /**
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
* of Coherency for guest stage-2 address * of Coherency for guest stage-2 address
......
...@@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) ...@@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
return pte; return pte;
} }
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) struct stage2_age_data {
bool mkold;
bool young;
};
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{ {
kvm_pte_t pte = 0; kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, struct stage2_age_data *data = ctx->arg;
&pte, NULL, 0);
if (!kvm_pte_valid(ctx->old) || new == ctx->old)
return 0;
data->young = true;
/*
* stage2_age_walker() is always called while holding the MMU lock for
* write, so this will always succeed. Nonetheless, this deliberately
* follows the race detection pattern of the other stage-2 walkers in
* case the locking mechanics of the MMU notifiers is ever changed.
*/
if (data->mkold && !stage2_try_set_pte(ctx, new))
return -EAGAIN;
/* /*
* "But where's the TLBI?!", you scream. * "But where's the TLBI?!", you scream.
* "Over in the core code", I sigh. * "Over in the core code", I sigh.
* *
* See the '->clear_flush_young()' callback on the KVM mmu notifier. * See the '->clear_flush_young()' callback on the KVM mmu notifier.
*/ */
return pte; return 0;
} }
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
u64 size, bool mkold)
{ {
kvm_pte_t pte = 0; struct stage2_age_data data = {
stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); .mkold = mkold,
return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; };
struct kvm_pgtable_walker walker = {
.cb = stage2_age_walker,
.arg = &data,
.flags = KVM_PGTABLE_WALK_LEAF,
};
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
return data.young;
} }
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
......
...@@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
u64 size = (range->end - range->start) << PAGE_SHIFT; u64 size = (range->end - range->start) << PAGE_SHIFT;
kvm_pte_t kpte;
pte_t pte;
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return false; return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT,
kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, size, true);
range->start << PAGE_SHIFT);
pte = __pte(kpte);
return pte_valid(pte) && pte_young(pte);
} }
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
u64 size = (range->end - range->start) << PAGE_SHIFT;
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return false; return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT); range->start << PAGE_SHIFT,
size, false);
} }
phys_addr_t kvm_mmu_get_httbr(void) phys_addr_t kvm_mmu_get_httbr(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment