Commit 49edf878 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: improve active sp audit

Both audit_rmap() and audit_write_protection() need to walk all active sp, so
we can do these checking in a sp walking
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 2f4f3372
...@@ -65,6 +65,16 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -65,6 +65,16 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
return; return;
} }
typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
{
struct kvm_mmu_page *sp;
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
fn(kvm, sp);
}
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
gva_t va, int level) gva_t va, int level)
{ {
...@@ -175,67 +185,59 @@ void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu) ...@@ -175,67 +185,59 @@ void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
mmu_spte_walk(vcpu, inspect_spte_has_rmap); mmu_spte_walk(vcpu, inspect_spte_has_rmap);
} }
static void check_mappings_rmap(struct kvm_vcpu *vcpu) static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
struct kvm_mmu_page *sp;
int i; int i;
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
u64 *pt = sp->spt;
if (sp->role.level != PT_PAGE_TABLE_LEVEL) if (sp->role.level != PT_PAGE_TABLE_LEVEL)
continue; return;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (!is_rmap_spte(pt[i])) if (!is_rmap_spte(sp->spt[i]))
continue; continue;
inspect_spte_has_rmap(vcpu->kvm, &pt[i]); inspect_spte_has_rmap(kvm, sp->spt + i);
}
} }
return;
} }
static void audit_rmap(struct kvm_vcpu *vcpu) void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
check_mappings_rmap(vcpu);
}
static void audit_write_protection(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long *rmapp; unsigned long *rmapp;
u64 *spte; u64 *spte;
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { if (sp->role.direct || sp->unsync || sp->role.invalid)
if (sp->role.direct) return;
continue;
if (sp->unsync)
continue;
if (sp->role.invalid)
continue;
slot = gfn_to_memslot(vcpu->kvm, sp->gfn); slot = gfn_to_memslot(kvm, sp->gfn);
rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
spte = rmap_next(vcpu->kvm, rmapp, NULL); spte = rmap_next(kvm, rmapp, NULL);
while (spte) { while (spte) {
if (is_writable_pte(*spte)) if (is_writable_pte(*spte))
printk(KERN_ERR "%s: (%s) shadow page has " printk(KERN_ERR "%s: (%s) shadow page has "
"writable mappings: gfn %llx role %x\n", "writable mappings: gfn %llx role %x\n",
__func__, audit_msg, sp->gfn, __func__, audit_msg, sp->gfn,
sp->role.word); sp->role.word);
spte = rmap_next(vcpu->kvm, rmapp, spte); spte = rmap_next(kvm, rmapp, spte);
}
} }
} }
static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
check_mappings_rmap(kvm, sp);
audit_write_protection(kvm, sp);
}
static void audit_all_active_sps(struct kvm *kvm)
{
walk_all_active_sps(kvm, audit_sp);
}
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point) static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
{ {
audit_msg = audit_point_name[audit_point]; audit_msg = audit_point_name[audit_point];
audit_rmap(vcpu); audit_all_active_sps(vcpu->kvm);
audit_write_protection(vcpu);
if (strcmp("pre pte write", audit_msg) != 0) if (strcmp("pre pte write", audit_msg) != 0)
audit_mappings(vcpu); audit_mappings(vcpu);
audit_sptes_have_rmaps(vcpu); audit_sptes_have_rmaps(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment