Commit b9877ce2 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Revert "KVM: PPC: Use kernel hash function"

It turns out the in-kernel hash function is sub-optimal for our subtle
hash inputs where every bit is significant. So let's revert to the original
hash functions.

This reverts commit 05340ab4f9a6626f7a2e8f9fe5397c61d494f445.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 928d78be
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hash.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) ...@@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
* a hash, so we don't waste cycles on looping */ * a hash, so we don't waste cycles on looping */
static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
{ {
return hash_64(gvsid, SID_MAP_BITS); return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
} }
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hash.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -44,9 +43,17 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) ...@@ -44,9 +43,17 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
* a hash, so we don't waste cycles on looping */ * a hash, so we don't waste cycles on looping */
static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
{ {
return hash_64(gvsid, SID_MAP_BITS); return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
} }
static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
{ {
struct kvmppc_sid_map *map; struct kvmppc_sid_map *map;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment