Commit afdad616 authored by Claudio Imbrenda's avatar Claudio Imbrenda Committed by Christian Borntraeger

KVM: s390: Fix storage attributes migration with memory slots

This is a fix for several issues that were found in the original code
for storage attributes migration.

Now no bitmap is allocated to keep track of dirty storage attributes;
the extra bits of the per-memslot bitmap that are always present anyway
are now used for this purpose.

The code has also been refactored a little to improve readability.

Fixes: 190df4a2 ("KVM: s390: CMMA tracking, ESSA emulation, migration mode")
Fixes: 4036e387 ("KVM: s390: ioctls to get and set guest storage attributes")
Acked-by: default avatarJanosch Frank <frankja@linux.vnet.ibm.com>
Signed-off-by: default avatarClaudio Imbrenda <imbrenda@linux.vnet.ibm.com>
Message-Id: <1525106005-13931-3-git-send-email-imbrenda@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 03133347
...@@ -793,12 +793,6 @@ struct kvm_s390_vsie { ...@@ -793,12 +793,6 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS]; struct page *pages[KVM_MAX_VCPUS];
}; };
struct kvm_s390_migration_state {
unsigned long bitmap_size; /* in bits (number of guest pages) */
atomic64_t dirty_pages; /* number of dirty pages */
unsigned long *pgste_bitmap;
};
struct kvm_arch{ struct kvm_arch{
void *sca; void *sca;
int use_esca; int use_esca;
...@@ -828,7 +822,8 @@ struct kvm_arch{ ...@@ -828,7 +822,8 @@ struct kvm_arch{
struct kvm_s390_vsie vsie; struct kvm_s390_vsie vsie;
u8 epdx; u8 epdx;
u64 epoch; u64 epoch;
struct kvm_s390_migration_state *migration_state; int migration_mode;
atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */ /* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
struct kvm_s390_gisa *gisa; struct kvm_s390_gisa *gisa;
......
This diff is collapsed.
...@@ -987,9 +987,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -987,9 +987,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) /*
* Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
*/
static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
{ {
struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
int r1, r2, nappended, entries; int r1, r2, nappended, entries;
unsigned long gfn, hva, res, pgstev, ptev; unsigned long gfn, hva, res, pgstev, ptev;
unsigned long *cbrlo; unsigned long *cbrlo;
...@@ -1039,10 +1041,12 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) ...@@ -1039,10 +1041,12 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
cbrlo[entries] = gfn << PAGE_SHIFT; cbrlo[entries] = gfn << PAGE_SHIFT;
} }
if (orc && gfn < ms->bitmap_size) { if (orc) {
/* increment only if we are really flipping the bit to 1 */ struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
atomic64_inc(&ms->dirty_pages); /* Increment only if we are really flipping the bit */
if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
} }
return nappended; return nappended;
...@@ -1071,7 +1075,7 @@ static int handle_essa(struct kvm_vcpu *vcpu) ...@@ -1071,7 +1075,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
: ESSA_SET_STABLE_IF_RESIDENT)) : ESSA_SET_STABLE_IF_RESIDENT))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (likely(!vcpu->kvm->arch.migration_state)) { if (!vcpu->kvm->arch.migration_mode) {
/* /*
* CMMA is enabled in the KVM settings, but is disabled in * CMMA is enabled in the KVM settings, but is disabled in
* the SIE block and in the mm_context, and we are not doing * the SIE block and in the mm_context, and we are not doing
...@@ -1099,10 +1103,16 @@ static int handle_essa(struct kvm_vcpu *vcpu) ...@@ -1099,10 +1103,16 @@ static int handle_essa(struct kvm_vcpu *vcpu)
/* Retry the ESSA instruction */ /* Retry the ESSA instruction */
kvm_s390_retry_instr(vcpu); kvm_s390_retry_instr(vcpu);
} else { } else {
/* Account for the possible extra cbrl entry */ int srcu_idx;
i = do_essa(vcpu, orc);
down_read(&vcpu->kvm->mm->mmap_sem);
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
up_read(&vcpu->kvm->mm->mmap_sem);
if (i < 0) if (i < 0)
return i; return i;
/* Account for the possible extra cbrl entry */
entries += i; entries += i;
} }
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment