Commit e37f37a0 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Make memcache anonymous in pgtable allocator

The current stage2 page-table allocator uses a memcache to get
pre-allocated pages when it needs any. To allow re-using this code at
EL2 which uses a concept of memory pools, make the memcache argument of
kvm_pgtable_stage2_map() anonymous, and let the mm_ops zalloc_page()
callbacks use it the way they need to.
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-26-qperret@google.com
parent 159b859b
...@@ -213,8 +213,8 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); ...@@ -213,8 +213,8 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* @size: Size of the mapping. * @size: Size of the mapping.
* @phys: Physical address of the memory to map. * @phys: Physical address of the memory to map.
* @prot: Permissions and attributes for the mapping. * @prot: Permissions and attributes for the mapping.
* @mc: Cache of pre-allocated GFP_PGTABLE_USER memory from which to * @mc: Cache of pre-allocated and zeroed memory from which to allocate
* allocate page-table pages. * page-table pages.
* *
* The offset of @addr within a page is ignored, @size is rounded-up to * The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page * the next page boundary and @phys is rounded-down to the previous page
...@@ -236,7 +236,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); ...@@ -236,7 +236,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
*/ */
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot, u64 phys, enum kvm_pgtable_prot prot,
struct kvm_mmu_memory_cache *mc); void *mc);
/** /**
* kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
......
...@@ -446,7 +446,7 @@ struct stage2_map_data { ...@@ -446,7 +446,7 @@ struct stage2_map_data {
kvm_pte_t *anchor; kvm_pte_t *anchor;
struct kvm_s2_mmu *mmu; struct kvm_s2_mmu *mmu;
struct kvm_mmu_memory_cache *memcache; void *memcache;
struct kvm_pgtable_mm_ops *mm_ops; struct kvm_pgtable_mm_ops *mm_ops;
}; };
...@@ -670,7 +670,7 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -670,7 +670,7 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot, u64 phys, enum kvm_pgtable_prot prot,
struct kvm_mmu_memory_cache *mc) void *mc)
{ {
int ret; int ret;
struct stage2_map_data map_data = { struct stage2_map_data map_data = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment