Commit 6c3f5bec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "ARM:

   - Correctly expose GICv3 support even if no irqchip is created so
     that userspace doesn't observe it changing pointlessly (fixing a
     regression with QEMU)

   - Don't issue a hypercall to set the id-mapped vectors when protected
     mode is enabled (fix for pKVM in combination with CPUs affected by
     Spectre-v3a)

  x86 (five oneliners, of which the most interesting two are):

   - a NULL pointer dereference on INVPCID executed with paging
     disabled, but only if KVM is using shadow paging

   - an incorrect bsearch comparison function which could truncate the
     result and apply PMU event filtering incorrectly. This one comes
     with a selftests update too"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
  KVM: x86: hyper-v: fix type of valid_bank_mask
  KVM: Free new dirty bitmap if creating a new memslot fails
  KVM: eventfd: Fix false positive RCU usage warning
  selftests: kvm/x86: Verify the pmu event filter matches the correct event
  selftests: kvm/x86: Add the helper function create_pmu_event_filter
  kvm: x86/pmu: Fix the compare function used by the pmu event filter
  KVM: arm64: Don't hypercall before EL2 init
  KVM: arm64: vgic-v3: Consistently populate ID_AA64PFR0_EL1.GIC
  KVM: x86/mmu: Update number of zapped pages even if page list is stable
parents b3454ce0 9f46c187
......@@ -1436,7 +1436,8 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
if (kvm_system_needs_idmapped_vectors() &&
!is_protected_kvm_enabled()) {
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
__BP_HARDEN_HYP_VECS_SZ, &base);
if (err)
......
......@@ -1123,8 +1123,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (irqchip_in_kernel(vcpu->kvm) &&
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
}
......
......@@ -1914,7 +1914,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct hv_send_ipi_ex send_ipi_ex;
struct hv_send_ipi send_ipi;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
unsigned long valid_bank_mask;
u64 valid_bank_mask;
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
u32 vector;
bool all_cpus;
......@@ -1956,7 +1956,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
if (hc->var_cnt != bitmap_weight(&valid_bank_mask, 64))
if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (all_cpus)
......
......@@ -5470,14 +5470,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
uint i;
if (pcid == kvm_get_active_pcid(vcpu)) {
mmu->invlpg(vcpu, gva, mmu->root.hpa);
if (mmu->invlpg)
mmu->invlpg(vcpu, gva, mmu->root.hpa);
tlb_flush = true;
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
if (mmu->invlpg)
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
tlb_flush = true;
}
}
......@@ -5665,6 +5667,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0;
bool unstable;
restart:
list_for_each_entry_safe_reverse(sp, node,
......@@ -5696,11 +5699,12 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
goto restart;
}
if (__kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
batch += nr_zapped;
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
batch += nr_zapped;
if (unstable)
goto restart;
}
}
/*
......
......@@ -171,9 +171,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return true;
}
static int cmp_u64(const void *a, const void *b)
static int cmp_u64(const void *pa, const void *pb)
{
return *(__u64 *)a - *(__u64 *)b;
u64 a = *(u64 *)pa;
u64 b = *(u64 *)pb;
return (a > b) - (a < b);
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
......
......@@ -208,7 +208,7 @@ static bool sanity_check_pmu(struct kvm_vm *vm)
return success;
}
static struct kvm_pmu_event_filter *make_pmu_event_filter(uint32_t nevents)
static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
{
struct kvm_pmu_event_filter *f;
int size = sizeof(*f) + nevents * sizeof(f->events[0]);
......@@ -220,19 +220,29 @@ static struct kvm_pmu_event_filter *make_pmu_event_filter(uint32_t nevents)
return f;
}
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
static struct kvm_pmu_event_filter *
create_pmu_event_filter(const uint64_t event_list[],
int nevents, uint32_t action)
{
struct kvm_pmu_event_filter *f;
int i;
f = make_pmu_event_filter(ARRAY_SIZE(event_list));
f = alloc_pmu_event_filter(nevents);
f->action = action;
for (i = 0; i < ARRAY_SIZE(event_list); i++)
for (i = 0; i < nevents; i++)
f->events[i] = event_list[i];
return f;
}
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
{
return create_pmu_event_filter(event_list,
ARRAY_SIZE(event_list),
action);
}
/*
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
......@@ -271,6 +281,22 @@ static uint64_t test_with_filter(struct kvm_vm *vm,
return run_vm_to_sync(vm);
}
static void test_amd_deny_list(struct kvm_vm *vm)
{
uint64_t event = EVENT(0x1C2, 0);
struct kvm_pmu_event_filter *f;
uint64_t count;
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY);
count = test_with_filter(vm, f);
free(f);
if (count != NUM_BRANCHES)
pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
__func__, count, NUM_BRANCHES);
TEST_ASSERT(count, "Allowed PMU event is not counting");
}
static void test_member_deny_list(struct kvm_vm *vm)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
......@@ -453,6 +479,9 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP);
}
if (use_amd_pmu())
test_amd_deny_list(vm);
test_without_filter(vm);
test_member_deny_list(vm);
test_member_allow_list(vm);
......
......@@ -77,7 +77,8 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
idx = srcu_read_lock(&kvm->irq_srcu);
list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
srcu_read_lock_held(&kvm->irq_srcu))
eventfd_signal(irqfd->resamplefd, 1);
srcu_read_unlock(&kvm->irq_srcu, idx);
......
......@@ -1560,7 +1560,7 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
r = kvm_arch_prepare_memory_region(kvm, old, new, change);
/* Free the bitmap on failure if it was allocated above. */
if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
kvm_destroy_dirty_bitmap(new);
return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment