Commit 74b566e6 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: x86: Refactor mmu_free_roots()

Extract the logic to free a root page in a separate function to avoid code
duplication in mmu_free_roots(). Also, change it to an exported function
i.e. kvm_mmu_free_roots().
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a780a3ea
...@@ -1277,6 +1277,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); ...@@ -1277,6 +1277,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception); struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
......
...@@ -222,7 +222,6 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | ...@@ -222,7 +222,6 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{ {
...@@ -3342,51 +3341,48 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -3342,51 +3341,48 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
return RET_PF_RETRY; return RET_PF_RETRY;
} }
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
static void mmu_free_roots(struct kvm_vcpu *vcpu) struct list_head *invalid_list)
{ {
int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(*root_hpa))
return; return;
if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL && sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
(vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL || --sp->root_count;
vcpu->arch.mmu.direct_map)) { if (!sp->root_count && sp->role.invalid)
hpa_t root = vcpu->arch.mmu.root_hpa; kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
spin_lock(&vcpu->kvm->mmu_lock); *root_hpa = INVALID_PAGE;
sp = page_header(root); }
--sp->root_count;
if (!sp->root_count && sp->role.invalid) { void kvm_mmu_free_roots(struct kvm_vcpu *vcpu)
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); {
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); int i;
} LIST_HEAD(invalid_list);
spin_unlock(&vcpu->kvm->mmu_lock); struct kvm_mmu *mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
if (!VALID_PAGE(mmu->root_hpa))
return; return;
}
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root) { if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
root &= PT64_BASE_ADDR_MASK; (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
sp = page_header(root); mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, &invalid_list);
--sp->root_count; } else {
if (!sp->root_count && sp->role.invalid) for (i = 0; i < 4; ++i)
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, if (mmu->pae_root[i] != 0)
&invalid_list); mmu_free_root_page(vcpu->kvm, &mmu->pae_root[i],
} &invalid_list);
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; mmu->root_hpa = INVALID_PAGE;
} }
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{ {
...@@ -3950,7 +3946,7 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -3950,7 +3946,7 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
{ {
mmu_free_roots(vcpu); kvm_mmu_free_roots(vcpu);
} }
static unsigned long get_cr3(struct kvm_vcpu *vcpu) static unsigned long get_cr3(struct kvm_vcpu *vcpu)
...@@ -4663,7 +4659,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); ...@@ -4663,7 +4659,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu) void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{ {
mmu_free_roots(vcpu); kvm_mmu_free_roots(vcpu);
WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
} }
EXPORT_SYMBOL_GPL(kvm_mmu_unload); EXPORT_SYMBOL_GPL(kvm_mmu_unload);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment