Commit f67a46f4 authored by Anthony Liguori's avatar Anthony Liguori Committed by Avi Kivity

KVM: MMU: Clean up MMU functions to take struct kvm when appropriate

Some of the MMU functions take a struct kvm_vcpu even though they affect all
VCPUs.  This patch cleans up some of them to instead take a struct kvm.  This
makes things a bit more clear.

The main thing that was confusing me was whether certain functions need to be
called on all VCPUs.
Signed-off-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 043405e1
...@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, ...@@ -606,7 +606,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
BUG(); BUG();
} }
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
gfn_t gfn) gfn_t gfn)
{ {
unsigned index; unsigned index;
...@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, ...@@ -616,7 +616,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index]; bucket = &kvm->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link) hlist_for_each_entry(page, node, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) { if (page->gfn == gfn && !page->role.metaphysical) {
pgprintk("%s: found role %x\n", pgprintk("%s: found role %x\n",
...@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) ...@@ -782,7 +782,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
} }
static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{ {
unsigned index; unsigned index;
struct hlist_head *bucket; struct hlist_head *bucket;
...@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -793,25 +793,25 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
r = 0; r = 0;
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index]; bucket = &kvm->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) { if (page->gfn == gfn && !page->role.metaphysical) {
pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
page->role.word); page->role.word);
kvm_mmu_zap_page(vcpu->kvm, page); kvm_mmu_zap_page(kvm, page);
r = 1; r = 1;
} }
return r; return r;
} }
static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn) static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
pgprintk("%s: zap %lx %x\n", pgprintk("%s: zap %lx %x\n",
__FUNCTION__, gfn, page->role.word); __FUNCTION__, gfn, page->role.word);
kvm_mmu_zap_page(vcpu->kvm, page); kvm_mmu_zap_page(kvm, page);
} }
} }
...@@ -1299,7 +1299,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1299,7 +1299,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{ {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
} }
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
......
...@@ -268,11 +268,11 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, ...@@ -268,11 +268,11 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
spte |= PT_WRITABLE_MASK; spte |= PT_WRITABLE_MASK;
if (user_fault) { if (user_fault) {
mmu_unshadow(vcpu, gfn); mmu_unshadow(vcpu->kvm, gfn);
goto unshadowed; goto unshadowed;
} }
shadow = kvm_mmu_lookup_page(vcpu, gfn); shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
if (shadow) { if (shadow) {
pgprintk("%s: found shadow page for %lx, marking ro\n", pgprintk("%s: found shadow page for %lx, marking ro\n",
__FUNCTION__, gfn); __FUNCTION__, gfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment