Commit 054efb64 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/cpufeature: Remove cpu_has_xmm2

Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-crypto@vger.kernel.org
Link: http://lkml.kernel.org/r/1459266123-21878-8-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 906bf7fd
......@@ -179,7 +179,7 @@ static struct shash_alg alg = {
static int __init poly1305_simd_mod_init(void)
{
if (!cpu_has_xmm2)
if (!boot_cpu_has(X86_FEATURE_XMM2))
return -ENODEV;
#ifdef CONFIG_AS_AVX2
......
......@@ -600,7 +600,7 @@ static struct crypto_alg serpent_algs[10] = { {
static int __init serpent_sse2_init(void)
{
if (!cpu_has_xmm2) {
if (!boot_cpu_has(X86_FEATURE_XMM2)) {
printk(KERN_INFO "SSE2 instructions are not detected.\n");
return -ENODEV;
}
......
......@@ -125,7 +125,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
......
......@@ -750,7 +750,7 @@ static void init_amd(struct cpuinfo_x86 *c)
if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8);
if (cpu_has_xmm2) {
if (cpu_has(c, X86_FEATURE_XMM2)) {
/* MFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
}
......
......@@ -456,7 +456,7 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
if (cpu_has_xmm2)
if (cpu_has(c, X86_FEATURE_XMM2))
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (boot_cpu_has(X86_FEATURE_DS)) {
......
......@@ -612,7 +612,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
{
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
n = __copy_user_zeroing_intel_nocache(to, from, n);
else
__copy_user_zeroing(to, from, n);
......@@ -629,7 +629,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
{
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
n = __copy_user_intel_nocache(to, from, n);
else
__copy_user(to, from, n);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment