Commit 5dc99b23 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity

KVM: Avoid checking huge page mappings in get_dirty_log()

Dropped such mappings when we enabled dirty logging and we will never
create new ones until we stop the logging.

For this we introduce a new function which can be used to write protect
a range of PT level pages: although we do not need to care about a range
of pages at this point, the following patch will need this feature to
optimize the write protection of many pages.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent a0ed4607
...@@ -712,8 +712,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, ...@@ -712,8 +712,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot); struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
......
...@@ -1037,27 +1037,47 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level ...@@ -1037,27 +1037,47 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level
return write_protected; return write_protected;
} }
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, /**
struct kvm_memory_slot *slot) * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
* @kvm: kvm instance
* @slot: slot to protect
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should protect
*
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{ {
unsigned long *rmapp; unsigned long *rmapp;
int i, write_protected = 0;
for (i = PT_PAGE_TABLE_LEVEL; while (mask) {
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
rmapp = __gfn_to_rmap(gfn, i, slot); __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
write_protected |= __rmap_write_protect(kvm, rmapp, i);
}
return write_protected; /* clear the first set bit */
mask &= mask - 1;
}
} }
static int rmap_write_protect(struct kvm *kvm, u64 gfn) static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long *rmapp;
int i;
int write_protected = 0;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
for (i = PT_PAGE_TABLE_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmapp, i);
}
return write_protected;
} }
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
......
...@@ -3095,13 +3095,11 @@ static void write_protect_slot(struct kvm *kvm, ...@@ -3095,13 +3095,11 @@ static void write_protect_slot(struct kvm *kvm,
/* Not many dirty pages compared to # of shadow pages. */ /* Not many dirty pages compared to # of shadow pages. */
if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
unsigned long gfn_offset; gfn_t offset;
for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { for_each_set_bit(offset, dirty_bitmap, memslot->npages)
unsigned long gfn = memslot->base_gfn + gfn_offset; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, 1);
kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
}
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} else } else
kvm_mmu_slot_remove_write_access(kvm, memslot->id); kvm_mmu_slot_remove_write_access(kvm, memslot->id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment