Commit fa002e8e authored by Oliver Upton's avatar Oliver Upton Committed by Marc Zyngier

KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data

In order to tear down page tables from outside the context of
kvm_pgtable (such as an RCU callback), stop passing a pointer through
kvm_pgtable_walk_data.

No functional change intended.
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221107215644.1895162-5-oliver.upton@linux.dev
parent 2a611c7f
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#define KVM_MAX_OWNER_ID 1 #define KVM_MAX_OWNER_ID 1
struct kvm_pgtable_walk_data { struct kvm_pgtable_walk_data {
struct kvm_pgtable *pgt;
struct kvm_pgtable_walker *walker; struct kvm_pgtable_walker *walker;
u64 addr; u64 addr;
...@@ -88,7 +87,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) ...@@ -88,7 +87,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
return (data->addr >> shift) & mask; return (data->addr >> shift) & mask;
} }
static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
{ {
u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
u64 mask = BIT(pgt->ia_bits) - 1; u64 mask = BIT(pgt->ia_bits) - 1;
...@@ -96,11 +95,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) ...@@ -96,11 +95,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
return (addr & mask) >> shift; return (addr & mask) >> shift;
} }
static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
{
return __kvm_pgd_page_idx(data->pgt, data->addr);
}
static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
{ {
struct kvm_pgtable pgt = { struct kvm_pgtable pgt = {
...@@ -108,7 +102,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) ...@@ -108,7 +102,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
.start_level = start_level, .start_level = start_level,
}; };
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
} }
static bool kvm_pte_table(kvm_pte_t pte, u32 level) static bool kvm_pte_table(kvm_pte_t pte, u32 level)
...@@ -255,11 +249,10 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, ...@@ -255,11 +249,10 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
return ret; return ret;
} }
static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data) static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
{ {
u32 idx; u32 idx;
int ret = 0; int ret = 0;
struct kvm_pgtable *pgt = data->pgt;
u64 limit = BIT(pgt->ia_bits); u64 limit = BIT(pgt->ia_bits);
if (data->addr > limit || data->end > limit) if (data->addr > limit || data->end > limit)
...@@ -268,7 +261,7 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data) ...@@ -268,7 +261,7 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
if (!pgt->pgd) if (!pgt->pgd)
return -EINVAL; return -EINVAL;
for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) { for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE]; kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
ret = __kvm_pgtable_walk(data, pgt->mm_ops, ptep, pgt->start_level); ret = __kvm_pgtable_walk(data, pgt->mm_ops, ptep, pgt->start_level);
...@@ -283,13 +276,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -283,13 +276,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker) struct kvm_pgtable_walker *walker)
{ {
struct kvm_pgtable_walk_data walk_data = { struct kvm_pgtable_walk_data walk_data = {
.pgt = pgt,
.addr = ALIGN_DOWN(addr, PAGE_SIZE), .addr = ALIGN_DOWN(addr, PAGE_SIZE),
.end = PAGE_ALIGN(walk_data.addr + size), .end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker, .walker = walker,
}; };
return _kvm_pgtable_walk(&walk_data); return _kvm_pgtable_walk(pgt, &walk_data);
} }
struct leaf_walk_data { struct leaf_walk_data {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment