Commit b02a765f authored by Borislav Petkov's avatar Borislav Petkov Committed by Stefan Bader

x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros

BugLink: https://bugs.launchpad.net/bugs/1777389

commit 362f924b upstream.

Those are stupid and code should use static_cpu_has_safe() or
boot_cpu_has() instead. Kill the least used and unused ones.

The remaining ones need more careful inspection before a conversion can
happen. On the TODO.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1449481182-27541-4-git-send-email-bp@alien8.de
Cc: David Sterba <dsterba@suse.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <jbacik@fb.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent fc4830ce
...@@ -125,7 +125,7 @@ static struct crypto_alg alg = { ...@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
static int __init chacha20_simd_mod_init(void) static int __init chacha20_simd_mod_init(void)
{ {
if (!cpu_has_ssse3) if (!boot_cpu_has(X86_FEATURE_SSSE3))
return -ENODEV; return -ENODEV;
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
......
...@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void) ...@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void)
if (!x86_match_cpu(crc32c_cpu_id)) if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV; return -ENODEV;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (cpu_has_pclmulqdq) { if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
alg.update = crc32c_pcl_intel_update; alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup; alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest; alg.digest = crc32c_pcl_intel_digest;
......
...@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) ...@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
if (offset) if (offset)
return offset; return offset;
if (!cpu_has_perfctr_core) if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
offset = index; offset = index;
else else
offset = index << 1; offset = index << 1;
...@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = { ...@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static int __init amd_core_pmu_init(void) static int __init amd_core_pmu_init(void)
{ {
if (!cpu_has_perfctr_core) if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0; return 0;
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
......
...@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void) ...@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
goto fail_nodev; goto fail_nodev;
if (!cpu_has_topoext) if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
goto fail_nodev; goto fail_nodev;
if (cpu_has_perfctr_nb) { if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
amd_uncore_nb = alloc_percpu(struct amd_uncore *); amd_uncore_nb = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_nb) { if (!amd_uncore_nb) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void) ...@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
ret = 0; ret = 0;
} }
if (cpu_has_perfctr_l2) { if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
amd_uncore_l2 = alloc_percpu(struct amd_uncore *); amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_l2) { if (!amd_uncore_l2) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -583,10 +583,11 @@ static int __init amd_uncore_init(void) ...@@ -583,10 +583,11 @@ static int __init amd_uncore_init(void)
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
amd_uncore_nb = amd_uncore_l2 = NULL; amd_uncore_nb = amd_uncore_l2 = NULL;
if (cpu_has_perfctr_l2)
if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
perf_pmu_unregister(&amd_l2_pmu); perf_pmu_unregister(&amd_l2_pmu);
fail_l2: fail_l2:
if (cpu_has_perfctr_nb) if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu); perf_pmu_unregister(&amd_nb_pmu);
if (amd_uncore_l2) if (amd_uncore_l2)
free_percpu(amd_uncore_l2); free_percpu(amd_uncore_l2);
......
...@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) ...@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
#endif #endif
#define system_has_cmpxchg_double() cpu_has_cx8 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
#endif /* _ASM_X86_CMPXCHG_32_H */ #endif /* _ASM_X86_CMPXCHG_32_H */
...@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) ...@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#define system_has_cmpxchg_double() cpu_has_cx16 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
#endif /* _ASM_X86_CMPXCHG_64_H */ #endif /* _ASM_X86_CMPXCHG_64_H */
...@@ -128,58 +128,29 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -128,58 +128,29 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) /*
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) * fast paths and boot_cpu_has() otherwise!
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) */
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
#if __GNUC__ >= 4 #if __GNUC__ >= 4
extern void warn_pre_alternatives(void); extern void warn_pre_alternatives(void);
......
...@@ -553,7 +553,7 @@ do { \ ...@@ -553,7 +553,7 @@ do { \
if (cpu_has_xmm) { \ if (cpu_has_xmm) { \
xor_speed(&xor_block_pIII_sse); \ xor_speed(&xor_block_pIII_sse); \
xor_speed(&xor_block_sse_pf64); \ xor_speed(&xor_block_sse_pf64); \
} else if (cpu_has_mmx) { \ } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \ xor_speed(&xor_block_p5_mmx); \
} else { \ } else { \
......
...@@ -313,7 +313,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) ...@@ -313,7 +313,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/* get information required for multi-node processors */ /* get information required for multi-node processors */
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
...@@ -1015,7 +1015,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ...@@ -1015,7 +1015,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
void set_dr_addr_mask(unsigned long mask, int dr) void set_dr_addr_mask(unsigned long mask, int dr)
{ {
if (!cpu_has_bpext) if (!boot_cpu_has(X86_FEATURE_BPEXT))
return; return;
switch (dr) { switch (dr) {
......
...@@ -1664,7 +1664,9 @@ void cpu_init(void) ...@@ -1664,7 +1664,9 @@ void cpu_init(void)
printk(KERN_INFO "Initializing CPU#%d\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu);
if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) if (cpu_feature_enabled(X86_FEATURE_VME) ||
cpu_has_tsc ||
boot_cpu_has(X86_FEATURE_DE))
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_current_idt(); load_current_idt();
......
...@@ -466,7 +466,8 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -466,7 +466,8 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has_xmm2) if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (cpu_has_ds) {
if (boot_cpu_has(X86_FEATURE_DS)) {
unsigned int l1; unsigned int l1;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11))) if (!(l1 & (1<<11)))
......
...@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) ...@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
unsigned edx; unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (cpu_has_topoext) if (boot_cpu_has(X86_FEATURE_TOPOEXT))
cpuid_count(0x8000001d, index, &eax.full, cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx); &ebx.full, &ecx.full, &edx);
else else
...@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) ...@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
void init_amd_cacheinfo(struct cpuinfo_x86 *c) void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{ {
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
num_cache_leaves = find_num_cache_leaves(c); num_cache_leaves = find_num_cache_leaves(c);
} else if (c->extended_cpuid_level >= 0x80000006) { } else if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000) if (cpuid_edx(0x80000006) & 0xf000)
...@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, ...@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct cacheinfo *this_leaf; struct cacheinfo *this_leaf;
int i, sibling; int i, sibling;
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last; unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index; this_leaf = this_cpu_ci->info_list + index;
......
...@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs) ...@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
void mtrr_save_fixed_ranges(void *info) void mtrr_save_fixed_ranges(void *info)
{ {
if (cpu_has_mtrr) if (boot_cpu_has(X86_FEATURE_MTRR))
get_fixed_ranges(mtrr_state.fixed_ranges); get_fixed_ranges(mtrr_state.fixed_ranges);
} }
......
...@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void) ...@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
phys_addr = 32; phys_addr = 32;
if (cpu_has_mtrr) { if (boot_cpu_has(X86_FEATURE_MTRR)) {
mtrr_if = &generic_mtrr_ops; mtrr_if = &generic_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(36); size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000; size_and_mask = 0x00f00000;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
*/ */
static void fpu__init_cpu_ctx_switch(void) static void fpu__init_cpu_ctx_switch(void)
{ {
if (!cpu_has_eager_fpu) if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
stts(); stts();
else else
clts(); clts();
...@@ -321,7 +321,7 @@ static void __init fpu__init_system_ctx_switch(void) ...@@ -321,7 +321,7 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(current->thread.fpu.fpstate_active); WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0; current_thread_info()->status = 0;
if (cpu_has_xsaveopt && eagerfpu != DISABLE) if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
eagerfpu = ENABLE; eagerfpu = ENABLE;
if (xfeatures_mask & XFEATURE_MASK_EAGER) if (xfeatures_mask & XFEATURE_MASK_EAGER)
......
...@@ -301,6 +301,10 @@ static int arch_build_bp_info(struct perf_event *bp) ...@@ -301,6 +301,10 @@ static int arch_build_bp_info(struct perf_event *bp)
return -EINVAL; return -EINVAL;
if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
return -EINVAL; return -EINVAL;
if (!boot_cpu_has(X86_FEATURE_BPEXT))
return -EOPNOTSUPP;
/* /*
* It's impossible to use a range breakpoint to fake out * It's impossible to use a range breakpoint to fake out
* user vs kernel detection because bp_len - 1 can't * user vs kernel detection because bp_len - 1 can't
...@@ -308,8 +312,6 @@ static int arch_build_bp_info(struct perf_event *bp) ...@@ -308,8 +312,6 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted * breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range. * addresses anywhere in the range.
*/ */
if (!cpu_has_bpext)
return -EOPNOTSUPP;
info->mask = bp->attr.bp_len - 1; info->mask = bp->attr.bp_len - 1;
info->len = X86_BREAKPOINT_LEN_1; info->len = X86_BREAKPOINT_LEN_1;
} }
......
...@@ -445,7 +445,7 @@ do { \ ...@@ -445,7 +445,7 @@ do { \
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ {
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index; int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id && if (c->phys_proc_id == o->phys_proc_id &&
......
...@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
tss = &per_cpu(cpu_tss, get_cpu()); tss = &per_cpu(cpu_tss, get_cpu());
/* make room for real-mode segments */ /* make room for real-mode segments */
tsk->thread.sp0 += 16; tsk->thread.sp0 += 16;
if (cpu_has_sep)
if (static_cpu_has_safe(X86_FEATURE_SEP))
tsk->thread.sysenter_cs = 0; tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread); load_sp0(tss, &tsk->thread);
put_cpu(); put_cpu();
......
...@@ -32,7 +32,7 @@ early_param("noexec", noexec_setup); ...@@ -32,7 +32,7 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void) void x86_configure_nx(void)
{ {
if (cpu_has_nx && !disable_nx) if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
__supported_pte_mask |= _PAGE_NX; __supported_pte_mask |= _PAGE_NX;
else else
__supported_pte_mask &= ~_PAGE_NX; __supported_pte_mask &= ~_PAGE_NX;
...@@ -40,7 +40,7 @@ void x86_configure_nx(void) ...@@ -40,7 +40,7 @@ void x86_configure_nx(void)
void __init x86_report_nx(void) void __init x86_report_nx(void)
{ {
if (!cpu_has_nx) { if (!boot_cpu_has(X86_FEATURE_NX)) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n"); "missing in CPU!\n");
} else { } else {
......
...@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng) ...@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
* RNG configuration like it used to be the case in this * RNG configuration like it used to be the case in this
* register */ * register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
pr_err(PFX "can't enable hardware RNG " pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n"); "if XSTORE is not enabled\n");
return -ENODEV; return -ENODEV;
...@@ -200,8 +200,9 @@ static int __init mod_init(void) ...@@ -200,8 +200,9 @@ static int __init mod_init(void)
{ {
int err; int err;
if (!cpu_has_xstore) if (!boot_cpu_has(X86_FEATURE_XSTORE))
return -ENODEV; return -ENODEV;
pr_info("VIA RNG detected\n"); pr_info("VIA RNG detected\n");
err = hwrng_register(&via_rng); err = hwrng_register(&via_rng);
if (err) { if (err) {
......
...@@ -515,7 +515,7 @@ static int __init padlock_init(void) ...@@ -515,7 +515,7 @@ static int __init padlock_init(void)
if (!x86_match_cpu(padlock_cpu_id)) if (!x86_match_cpu(padlock_cpu_id))
return -ENODEV; return -ENODEV;
if (!cpu_has_xcrypt_enabled) { if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -540,7 +540,7 @@ static int __init padlock_init(void) ...@@ -540,7 +540,7 @@ static int __init padlock_init(void)
struct shash_alg *sha1; struct shash_alg *sha1;
struct shash_alg *sha256; struct shash_alg *sha256;
if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
return -ENODEV; return -ENODEV;
/* Register the newly added algorithm module if on * /* Register the newly added algorithm module if on *
......
...@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void) ...@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
* should have X86_FEATURE_CX16 support, this has been confirmed * should have X86_FEATURE_CX16 support, this has been confirmed
* with Intel hardware guys. * with Intel hardware guys.
*/ */
if ( cpu_has_cx16 ) if (boot_cpu_has(X86_FEATURE_CX16))
intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
for_each_iommu(iommu, drhd) for_each_iommu(iommu, drhd)
......
...@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) ...@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
if (bio_flags & EXTENT_BIO_TREE_LOG) if (bio_flags & EXTENT_BIO_TREE_LOG)
return 0; return 0;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (cpu_has_xmm4_2) if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
return 0; return 0;
#endif #endif
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment