Commit d978b9cf authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Remove hyp_pool pointer from struct hyp_page

Each struct hyp_page currently contains a pointer to a hyp_pool struct
where the page should be freed if its refcount reaches 0. However, this
information can always be inferred from the context in the EL2 code, so
drop the pointer to save a few bytes in the vmemmap.
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210608114518.748712-6-qperret@google.com
parent 7c350ea3
...@@ -24,8 +24,8 @@ struct hyp_pool { ...@@ -24,8 +24,8 @@ struct hyp_pool {
/* Allocation */ /* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order); void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
void hyp_get_page(void *addr); void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(void *addr); void hyp_put_page(struct hyp_pool *pool, void *addr);
/* Used pages cannot be freed */ /* Used pages cannot be freed */
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
......
...@@ -7,11 +7,9 @@ ...@@ -7,11 +7,9 @@
#include <linux/types.h> #include <linux/types.h>
struct hyp_pool;
struct hyp_page { struct hyp_page {
unsigned int refcount; unsigned int refcount;
unsigned int order; unsigned int order;
struct hyp_pool *pool;
}; };
extern u64 __hyp_vmemmap; extern u64 __hyp_vmemmap;
......
...@@ -43,6 +43,16 @@ static void *host_s2_zalloc_page(void *pool) ...@@ -43,6 +43,16 @@ static void *host_s2_zalloc_page(void *pool)
return hyp_alloc_pages(pool, 0); return hyp_alloc_pages(pool, 0);
} }
static void host_s2_get_page(void *addr)
{
hyp_get_page(&host_s2_pool, addr);
}
static void host_s2_put_page(void *addr)
{
hyp_put_page(&host_s2_pool, addr);
}
static int prepare_s2_pool(void *pgt_pool_base) static int prepare_s2_pool(void *pgt_pool_base)
{ {
unsigned long nr_pages, pfn; unsigned long nr_pages, pfn;
...@@ -60,8 +70,8 @@ static int prepare_s2_pool(void *pgt_pool_base) ...@@ -60,8 +70,8 @@ static int prepare_s2_pool(void *pgt_pool_base)
.phys_to_virt = hyp_phys_to_virt, .phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys, .virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count, .page_count = hyp_page_count,
.get_page = hyp_get_page, .get_page = host_s2_get_page,
.put_page = hyp_put_page, .put_page = host_s2_put_page,
}; };
return 0; return 0;
......
...@@ -174,20 +174,18 @@ static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) ...@@ -174,20 +174,18 @@ static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
* section to guarantee transient states (e.g. a page with null refcount but * section to guarantee transient states (e.g. a page with null refcount but
* not yet attached to a free list) can't be observed by well-behaved readers. * not yet attached to a free list) can't be observed by well-behaved readers.
*/ */
void hyp_put_page(void *addr) void hyp_put_page(struct hyp_pool *pool, void *addr)
{ {
struct hyp_page *p = hyp_virt_to_page(addr); struct hyp_page *p = hyp_virt_to_page(addr);
struct hyp_pool *pool = hyp_page_to_pool(p);
hyp_spin_lock(&pool->lock); hyp_spin_lock(&pool->lock);
__hyp_put_page(pool, p); __hyp_put_page(pool, p);
hyp_spin_unlock(&pool->lock); hyp_spin_unlock(&pool->lock);
} }
void hyp_get_page(void *addr) void hyp_get_page(struct hyp_pool *pool, void *addr)
{ {
struct hyp_page *p = hyp_virt_to_page(addr); struct hyp_page *p = hyp_virt_to_page(addr);
struct hyp_pool *pool = hyp_page_to_pool(p);
hyp_spin_lock(&pool->lock); hyp_spin_lock(&pool->lock);
hyp_page_ref_inc(p); hyp_page_ref_inc(p);
...@@ -236,7 +234,6 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, ...@@ -236,7 +234,6 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
/* Init the vmemmap portion */ /* Init the vmemmap portion */
p = hyp_phys_to_page(phys); p = hyp_phys_to_page(phys);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
p[i].pool = pool;
p[i].order = 0; p[i].order = 0;
hyp_set_page_refcounted(&p[i]); hyp_set_page_refcounted(&p[i]);
} }
......
...@@ -137,6 +137,16 @@ static void *hyp_zalloc_hyp_page(void *arg) ...@@ -137,6 +137,16 @@ static void *hyp_zalloc_hyp_page(void *arg)
return hyp_alloc_pages(&hpool, 0); return hyp_alloc_pages(&hpool, 0);
} }
static void hpool_get_page(void *addr)
{
hyp_get_page(&hpool, addr);
}
static void hpool_put_page(void *addr)
{
hyp_put_page(&hpool, addr);
}
void __noreturn __pkvm_init_finalise(void) void __noreturn __pkvm_init_finalise(void)
{ {
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
...@@ -160,8 +170,8 @@ void __noreturn __pkvm_init_finalise(void) ...@@ -160,8 +170,8 @@ void __noreturn __pkvm_init_finalise(void)
.zalloc_page = hyp_zalloc_hyp_page, .zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt, .phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys, .virt_to_phys = hyp_virt_to_phys,
.get_page = hyp_get_page, .get_page = hpool_get_page,
.put_page = hyp_put_page, .put_page = hpool_put_page,
}; };
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment