Commit dd03bcaa authored by Peter Xu's avatar Peter Xu Committed by Paolo Bonzini

KVM: X86: Force ASYNC_PF_PER_VCPU to be power of two

Forcing the ASYNC_PF_PER_VCPU to be power of two is much easier to be
used rather than calling roundup_pow_of_two() from time to time.  Do
this by adding a BUILD_BUG_ON() inside the hash function.

Another point is that generally async pf does not allow concurrency
over ASYNC_PF_PER_VCPU after all (see kvm_setup_async_pf()), so it
does not make much sense either to have it not a power of two or some
of the entries will definitely be wasted.
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Message-Id: <20200416155859.267366-1-peterx@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c16312f4
...@@ -763,7 +763,7 @@ struct kvm_vcpu_arch { ...@@ -763,7 +763,7 @@ struct kvm_vcpu_arch {
struct { struct {
bool halted; bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; gfn_t gfns[ASYNC_PF_PER_VCPU];
struct gfn_to_hva_cache data; struct gfn_to_hva_cache data;
u64 msr_val; u64 msr_val;
u32 id; u32 id;
......
...@@ -257,7 +257,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); ...@@ -257,7 +257,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{ {
int i; int i;
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
vcpu->arch.apf.gfns[i] = ~0; vcpu->arch.apf.gfns[i] = ~0;
} }
...@@ -10310,12 +10310,14 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -10310,12 +10310,14 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{ {
BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
} }
static inline u32 kvm_async_pf_next_probe(u32 key) static inline u32 kvm_async_pf_next_probe(u32 key)
{ {
return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
} }
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
...@@ -10333,7 +10335,7 @@ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -10333,7 +10335,7 @@ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
int i; int i;
u32 key = kvm_async_pf_hash_fn(gfn); u32 key = kvm_async_pf_hash_fn(gfn);
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && for (i = 0; i < ASYNC_PF_PER_VCPU &&
(vcpu->arch.apf.gfns[key] != gfn && (vcpu->arch.apf.gfns[key] != gfn &&
vcpu->arch.apf.gfns[key] != ~0); i++) vcpu->arch.apf.gfns[key] != ~0); i++)
key = kvm_async_pf_next_probe(key); key = kvm_async_pf_next_probe(key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment