Commit 714b93da authored by Avi Kivity's avatar Avi Kivity Committed by Linus Torvalds

[PATCH] KVM: MMU: Replace atomic allocations by preallocated objects

The mmu sometimes needs memory for reverse mapping and parent pte chains.
however, we can't allocate from within the mmu because of the atomic context.

So, move the allocations to a central place that can be executed before the
main mmu machinery, where we can bail out on failure before any damage is
done.

(error handling is deffered for now, but the basic structure is there)
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f51234c2
...@@ -168,6 +168,17 @@ struct kvm_mmu { ...@@ -168,6 +168,17 @@ struct kvm_mmu {
u64 *pae_root; u64 *pae_root;
}; };
#define KVM_NR_MEM_OBJS 20
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_guest_debug { struct kvm_guest_debug {
int enabled; int enabled;
unsigned long bp[4]; unsigned long bp[4];
...@@ -239,6 +250,9 @@ struct kvm_vcpu { ...@@ -239,6 +250,9 @@ struct kvm_vcpu {
struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
struct kvm_mmu mmu; struct kvm_mmu mmu;
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
gfn_t last_pt_write_gfn; gfn_t last_pt_write_gfn;
int last_pt_write_count; int last_pt_write_count;
...@@ -381,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); ...@@ -381,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
......
...@@ -702,6 +702,13 @@ static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm, ...@@ -702,6 +702,13 @@ static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
return r; return r;
} }
static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
{
spin_lock(&vcpu->kvm->lock);
kvm_mmu_slot_remove_write_access(vcpu, slot);
spin_unlock(&vcpu->kvm->lock);
}
/* /*
* Get (and clear) the dirty memory log for a memory slot. * Get (and clear) the dirty memory log for a memory slot.
*/ */
...@@ -711,6 +718,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -711,6 +718,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
int n; int n;
int cleared;
unsigned long any = 0; unsigned long any = 0;
spin_lock(&kvm->lock); spin_lock(&kvm->lock);
...@@ -741,15 +749,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -741,15 +749,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
if (any) { if (any) {
spin_lock(&kvm->lock); cleared = 0;
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->lock);
memset(memslot->dirty_bitmap, 0, n);
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu = vcpu_load(kvm, i); struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
if (!vcpu) if (!vcpu)
continue; continue;
if (!cleared) {
do_remove_write_access(vcpu, log->slot);
memset(memslot->dirty_bitmap, 0, n);
cleared = 1;
}
kvm_arch_ops->tlb_flush(vcpu); kvm_arch_ops->tlb_flush(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
} }
......
This diff is collapsed.
...@@ -323,7 +323,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, ...@@ -323,7 +323,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
mark_page_dirty(vcpu->kvm, gfn); mark_page_dirty(vcpu->kvm, gfn);
*shadow_ent |= PT_WRITABLE_MASK; *shadow_ent |= PT_WRITABLE_MASK;
*guest_ent |= PT_DIRTY_MASK; *guest_ent |= PT_DIRTY_MASK;
rmap_add(vcpu->kvm, shadow_ent); rmap_add(vcpu, shadow_ent);
return 1; return 1;
} }
...@@ -353,6 +353,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -353,6 +353,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int write_pt = 0; int write_pt = 0;
pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
mmu_topup_memory_caches(vcpu);
/* /*
* Look up the shadow pte for the faulting address. * Look up the shadow pte for the faulting address.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment