Commit 2a49f61d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Ensure validity of memslot with respect to kvm_get_dirty_log()

Rework kvm_get_dirty_log() so that it "returns" the associated memslot
on success.  A future patch will rework memslot handling such that
id_to_memslot() can return NULL, returning the memslot makes it more
obvious that the validity of the memslot has been verified, i.e.
precludes the need to add validity checks in the arch code that are
technically unnecessary.

To maintain ordering in s390, move the call to kvm_arch_sync_dirty_log()
from s390's kvm_vm_ioctl_get_dirty_log() to the new kvm_get_dirty_log().
This is a nop for PPC, the only other arch that doesn't select
KVM_GENERIC_DIRTYLOG_READ_PROTECT, as its sync_dirty_log() is empty.

Ideally, moving the sync_dirty_log() call would be done in a separate
patch, but it can't be done in a follow-on patch because that would
temporarily break s390's ordering.  Making the move in a preparatory
patch would be functionally correct, but would create an odd scenario
where the moved sync_dirty_log() would operate on a "different" memslot
due to consuming the result of a different id_to_memslot().  The
memslot couldn't actually be different as slots_lock is held, but the
code is confusing enough as it is, i.e. moving sync_dirty_log() in this
patch is the lesser of all evils.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0dff0846
...@@ -1884,7 +1884,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1884,7 +1884,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
struct kvm_dirty_log *log) struct kvm_dirty_log *log)
{ {
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
ulong ga, ga_end; ulong ga, ga_end;
...@@ -1894,15 +1893,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, ...@@ -1894,15 +1893,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log(kvm, log, &is_dirty); r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r) if (r)
goto out; goto out;
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);
ga = memslot->base_gfn << PAGE_SHIFT; ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT); ga_end = ga + (memslot->npages << PAGE_SHIFT);
......
...@@ -611,9 +611,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -611,9 +611,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{ {
int r; int r;
unsigned long n; unsigned long n;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int is_dirty = 0; int is_dirty;
if (kvm_is_ucontrol(kvm)) if (kvm_is_ucontrol(kvm))
return -EINVAL; return -EINVAL;
...@@ -624,14 +623,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -624,14 +623,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_USER_MEM_SLOTS) if (log->slot >= KVM_USER_MEM_SLOTS)
goto out; goto out;
slots = kvm_memslots(kvm); r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
memslot = id_to_memslot(slots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
kvm_arch_sync_dirty_log(kvm, memslot);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r) if (r)
goto out; goto out;
......
...@@ -828,7 +828,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, ...@@ -828,7 +828,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty); int *is_dirty, struct kvm_memory_slot **memslot);
#endif #endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
......
...@@ -1199,31 +1199,42 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, ...@@ -1199,31 +1199,42 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
} }
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_get_dirty_log(struct kvm *kvm, /**
struct kvm_dirty_log *log, int *is_dirty) * kvm_get_dirty_log - get a snapshot of dirty pages
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
* @is_dirty: set to '1' if any dirty pages were found
* @memslot: set to the associated memslot, always valid on success
*/
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, as_id, id; int i, as_id, id;
unsigned long n; unsigned long n;
unsigned long any = 0; unsigned long any = 0;
*memslot = NULL;
*is_dirty = 0;
as_id = log->slot >> 16; as_id = log->slot >> 16;
id = (u16)log->slot; id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL; return -EINVAL;
slots = __kvm_memslots(kvm, as_id); slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id); *memslot = id_to_memslot(slots, id);
if (!memslot->dirty_bitmap) if (!(*memslot)->dirty_bitmap)
return -ENOENT; return -ENOENT;
n = kvm_dirty_bitmap_bytes(memslot); kvm_arch_sync_dirty_log(kvm, *memslot);
n = kvm_dirty_bitmap_bytes(*memslot);
for (i = 0; !any && i < n/sizeof(long); ++i) for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i]; any = (*memslot)->dirty_bitmap[i];
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
return -EFAULT; return -EFAULT;
if (any) if (any)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment