lab.nexedi.com will be down from Thursday, 20 March 2025, 07:30:00 UTC for a duration of approximately 2 hours

Commit 7bb44401 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Greg Kroah-Hartman

KVM: fix the handling of dirty bitmaps to avoid overflows

(Cherry-picked from commit 87bf6e7d)

Int is not long enough to store the size of a dirty bitmap.

This patch fixes this problem with the introduction of a wrapper
function to calculate the sizes of dirty bitmaps.

Note: in mark_page_dirty(), we have to consider the fact that
  __set_bit() takes the offset as int, not long.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 3218dbfc
...@@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
long n, base; long base;
unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
...@@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG; base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) { for (i = 0; i < n/sizeof(long); ++i) {
...@@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log) struct kvm_dirty_log *log)
{ {
int r; int r;
int n; unsigned long n;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int is_dirty = 0; int is_dirty = 0;
...@@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (is_dirty) { if (is_dirty) {
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
r = 0; r = 0;
......
...@@ -2133,7 +2133,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2133,7 +2133,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log) struct kvm_dirty_log *log)
{ {
int r; int r;
int n; unsigned long n;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int is_dirty = 0; int is_dirty = 0;
...@@ -2149,7 +2149,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2149,7 +2149,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
r = 0; r = 0;
......
...@@ -116,6 +116,11 @@ struct kvm_memory_slot { ...@@ -116,6 +116,11 @@ struct kvm_memory_slot {
int user_alloc; int user_alloc;
}; };
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}
struct kvm_kernel_irq_routing_entry { struct kvm_kernel_irq_routing_entry {
u32 gsi; u32 gsi;
u32 type; u32 type;
......
...@@ -1226,7 +1226,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1226,7 +1226,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Allocate page dirty bitmap if needed */ /* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes); new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap) if (!new.dirty_bitmap)
...@@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
int n; unsigned long n;
unsigned long any = 0; unsigned long any = 0;
r = -EINVAL; r = -EINVAL;
...@@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i) for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i]; any = memslot->dirty_bitmap[i];
...@@ -1663,10 +1663,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) ...@@ -1663,10 +1663,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot_unaliased(kvm, gfn); memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;
unsigned long *p = memslot->dirty_bitmap +
rel_gfn / BITS_PER_LONG;
int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */ /* avoid RMW */
if (!test_bit(rel_gfn, memslot->dirty_bitmap)) if (!test_bit(offset, p))
set_bit(rel_gfn, memslot->dirty_bitmap); set_bit(offset, p);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment