Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
cb44d0cf
Commit
cb44d0cf
authored
Apr 13, 2016
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/cpu' into x86/asm, to merge more patches
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
482dd2ef
d7847a70
Changes
41
Show whitespace changes
Inline
Side-by-side
Showing
41 changed files
with
67 additions
and
69 deletions
+67
-69
arch/ia64/include/asm/iommu.h
arch/ia64/include/asm/iommu.h
+0
-1
arch/nios2/kernel/prom.c
arch/nios2/kernel/prom.c
+1
-2
arch/x86/crypto/camellia_aesni_avx2_glue.c
arch/x86/crypto/camellia_aesni_avx2_glue.c
+2
-1
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/crypto/camellia_aesni_avx_glue.c
+1
-1
arch/x86/crypto/poly1305_glue.c
arch/x86/crypto/poly1305_glue.c
+1
-1
arch/x86/crypto/serpent_avx2_glue.c
arch/x86/crypto/serpent_avx2_glue.c
+1
-1
arch/x86/crypto/serpent_sse2_glue.c
arch/x86/crypto/serpent_sse2_glue.c
+1
-1
arch/x86/events/intel/cstate.c
arch/x86/events/intel/cstate.c
+1
-1
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.c
+1
-1
arch/x86/include/asm/apic.h
arch/x86/include/asm/apic.h
+2
-2
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeature.h
+0
-9
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable.h
+1
-1
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/tlbflush.h
+1
-1
arch/x86/include/asm/xor_avx.h
arch/x86/include/asm/xor_avx.h
+2
-2
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic.c
+1
-1
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/amd.c
+1
-1
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+5
-5
arch/x86/kernel/cpu/mtrr/cyrix.c
arch/x86/kernel/cpu/mtrr/cyrix.c
+2
-2
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/generic.c
+2
-2
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/cpu/vmware.c
+1
-1
arch/x86/kernel/kvm.c
arch/x86/kernel/kvm.c
+1
-1
arch/x86/kernel/tce_64.c
arch/x86/kernel/tce_64.c
+1
-1
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.c
+2
-1
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_32.c
+2
-2
arch/x86/mm/hugetlbpage.c
arch/x86/mm/hugetlbpage.c
+2
-2
arch/x86/mm/init.c
arch/x86/mm/init.c
+4
-4
arch/x86/mm/init_32.c
arch/x86/mm/init_32.c
+1
-1
arch/x86/mm/init_64.c
arch/x86/mm/init_64.c
+2
-2
arch/x86/mm/ioremap.c
arch/x86/mm/ioremap.c
+2
-2
arch/x86/mm/pageattr.c
arch/x86/mm/pageattr.c
+2
-2
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/nmi_int.c
+2
-2
arch/x86/oprofile/op_model_ppro.c
arch/x86/oprofile/op_model_ppro.c
+1
-1
arch/x86/power/hibernate_32.c
arch/x86/power/hibernate_32.c
+1
-1
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten.c
+2
-2
crypto/asymmetric_keys/pkcs7_trust.c
crypto/asymmetric_keys/pkcs7_trust.c
+2
-0
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/drm_cache.c
+3
-3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
+1
-1
drivers/hwmon/max1111.c
drivers/hwmon/max1111.c
+6
-0
drivers/lguest/x86/core.c
drivers/lguest/x86/core.c
+1
-1
drivers/staging/unisys/visorbus/visorchipset.c
drivers/staging/unisys/visorbus/visorchipset.c
+1
-1
fs/dlm/config.c
fs/dlm/config.c
+1
-2
No files found.
arch/ia64/include/asm/iommu.h
View file @
cb44d0cf
#ifndef _ASM_IA64_IOMMU_H
#ifndef _ASM_IA64_IOMMU_H
#define _ASM_IA64_IOMMU_H 1
#define _ASM_IA64_IOMMU_H 1
#define cpu_has_x2apic 0
/* 10 seconds */
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
...
...
arch/nios2/kernel/prom.c
View file @
cb44d0cf
...
@@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
...
@@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
return
0
;
return
0
;
#endif
#endif
*
addr64
=
fdt_translate_address
((
const
void
*
)
initial_boot_params
,
*
addr64
=
of_flat_dt_translate_address
(
node
);
node
);
return
*
addr64
==
OF_BAD_ADDR
?
0
:
1
;
return
*
addr64
==
OF_BAD_ADDR
?
0
:
1
;
}
}
...
...
arch/x86/crypto/camellia_aesni_avx2_glue.c
View file @
cb44d0cf
...
@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(void)
...
@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(void)
{
{
const
char
*
feature_name
;
const
char
*
feature_name
;
if
(
!
cpu_has_avx2
||
!
cpu_has_avx
||
!
cpu_has_aes
||
!
cpu_has_osxsave
)
{
if
(
!
cpu_has_avx2
||
!
cpu_has_avx
||
!
cpu_has_aes
||
!
boot_cpu_has
(
X86_FEATURE_OSXSAVE
))
{
pr_info
(
"AVX2 or AES-NI instructions are not detected.
\n
"
);
pr_info
(
"AVX2 or AES-NI instructions are not detected.
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
...
arch/x86/crypto/camellia_aesni_avx_glue.c
View file @
cb44d0cf
...
@@ -554,7 +554,7 @@ static int __init camellia_aesni_init(void)
...
@@ -554,7 +554,7 @@ static int __init camellia_aesni_init(void)
{
{
const
char
*
feature_name
;
const
char
*
feature_name
;
if
(
!
cpu_has_avx
||
!
cpu_has_aes
||
!
cpu_has_osxsave
)
{
if
(
!
cpu_has_avx
||
!
cpu_has_aes
||
!
boot_cpu_has
(
X86_FEATURE_OSXSAVE
)
)
{
pr_info
(
"AVX or AES-NI instructions are not detected.
\n
"
);
pr_info
(
"AVX or AES-NI instructions are not detected.
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
...
arch/x86/crypto/poly1305_glue.c
View file @
cb44d0cf
...
@@ -179,7 +179,7 @@ static struct shash_alg alg = {
...
@@ -179,7 +179,7 @@ static struct shash_alg alg = {
static
int
__init
poly1305_simd_mod_init
(
void
)
static
int
__init
poly1305_simd_mod_init
(
void
)
{
{
if
(
!
cpu_has_xmm2
)
if
(
!
boot_cpu_has
(
X86_FEATURE_XMM2
)
)
return
-
ENODEV
;
return
-
ENODEV
;
#ifdef CONFIG_AS_AVX2
#ifdef CONFIG_AS_AVX2
...
...
arch/x86/crypto/serpent_avx2_glue.c
View file @
cb44d0cf
...
@@ -538,7 +538,7 @@ static int __init init(void)
...
@@ -538,7 +538,7 @@ static int __init init(void)
{
{
const
char
*
feature_name
;
const
char
*
feature_name
;
if
(
!
cpu_has_avx2
||
!
cpu_has_osxsave
)
{
if
(
!
cpu_has_avx2
||
!
boot_cpu_has
(
X86_FEATURE_OSXSAVE
)
)
{
pr_info
(
"AVX2 instructions are not detected.
\n
"
);
pr_info
(
"AVX2 instructions are not detected.
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
...
arch/x86/crypto/serpent_sse2_glue.c
View file @
cb44d0cf
...
@@ -600,7 +600,7 @@ static struct crypto_alg serpent_algs[10] = { {
...
@@ -600,7 +600,7 @@ static struct crypto_alg serpent_algs[10] = { {
static
int
__init
serpent_sse2_init
(
void
)
static
int
__init
serpent_sse2_init
(
void
)
{
{
if
(
!
cpu_has_xmm2
)
{
if
(
!
boot_cpu_has
(
X86_FEATURE_XMM2
)
)
{
printk
(
KERN_INFO
"SSE2 instructions are not detected.
\n
"
);
printk
(
KERN_INFO
"SSE2 instructions are not detected.
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
...
arch/x86/events/intel/cstate.c
View file @
cb44d0cf
...
@@ -677,7 +677,7 @@ static int __init cstate_pmu_init(void)
...
@@ -677,7 +677,7 @@ static int __init cstate_pmu_init(void)
{
{
int
err
;
int
err
;
if
(
cpu_has_hypervisor
)
if
(
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
)
return
-
ENODEV
;
return
-
ENODEV
;
err
=
cstate_init
();
err
=
cstate_init
();
...
...
arch/x86/events/intel/uncore.c
View file @
cb44d0cf
...
@@ -1383,7 +1383,7 @@ static int __init intel_uncore_init(void)
...
@@ -1383,7 +1383,7 @@ static int __init intel_uncore_init(void)
if
(
boot_cpu_data
.
x86_vendor
!=
X86_VENDOR_INTEL
)
if
(
boot_cpu_data
.
x86_vendor
!=
X86_VENDOR_INTEL
)
return
-
ENODEV
;
return
-
ENODEV
;
if
(
cpu_has_hypervisor
)
if
(
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
)
return
-
ENODEV
;
return
-
ENODEV
;
max_packages
=
topology_max_packages
();
max_packages
=
topology_max_packages
();
...
...
arch/x86/include/asm/apic.h
View file @
cb44d0cf
...
@@ -239,10 +239,10 @@ extern void __init check_x2apic(void);
...
@@ -239,10 +239,10 @@ extern void __init check_x2apic(void);
extern
void
x2apic_setup
(
void
);
extern
void
x2apic_setup
(
void
);
static
inline
int
x2apic_enabled
(
void
)
static
inline
int
x2apic_enabled
(
void
)
{
{
return
cpu_has_x2apic
&&
apic_is_x2apic_enabled
();
return
boot_cpu_has
(
X86_FEATURE_X2APIC
)
&&
apic_is_x2apic_enabled
();
}
}
#define x2apic_supported() (
cpu_has_x2apic
)
#define x2apic_supported() (
boot_cpu_has(X86_FEATURE_X2APIC)
)
#else
/* !CONFIG_X86_X2APIC */
#else
/* !CONFIG_X86_X2APIC */
static
inline
void
check_x2apic
(
void
)
{
}
static
inline
void
check_x2apic
(
void
)
{
}
static
inline
void
x2apic_setup
(
void
)
{
}
static
inline
void
x2apic_setup
(
void
)
{
}
...
...
arch/x86/include/asm/cpufeature.h
View file @
cb44d0cf
...
@@ -119,25 +119,16 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
...
@@ -119,25 +119,16 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
} while (0)
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
/*
/*
* Do not add any more of those clumsy macros - use static_cpu_has() for
* Do not add any more of those clumsy macros - use static_cpu_has() for
* fast paths and boot_cpu_has() otherwise!
* fast paths and boot_cpu_has() otherwise!
...
...
arch/x86/include/asm/pgtable.h
View file @
cb44d0cf
...
@@ -183,7 +183,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
...
@@ -183,7 +183,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
static
inline
int
has_transparent_hugepage
(
void
)
static
inline
int
has_transparent_hugepage
(
void
)
{
{
return
cpu_has_pse
;
return
boot_cpu_has
(
X86_FEATURE_PSE
)
;
}
}
#ifdef __HAVE_ARCH_PTE_DEVMAP
#ifdef __HAVE_ARCH_PTE_DEVMAP
...
...
arch/x86/include/asm/tlbflush.h
View file @
cb44d0cf
...
@@ -181,7 +181,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
...
@@ -181,7 +181,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
static
inline
void
__flush_tlb_all
(
void
)
static
inline
void
__flush_tlb_all
(
void
)
{
{
if
(
cpu_has_pge
)
if
(
static_cpu_has
(
X86_FEATURE_PGE
)
)
__flush_tlb_global
();
__flush_tlb_global
();
else
else
__flush_tlb
();
__flush_tlb
();
...
...
arch/x86/include/asm/xor_avx.h
View file @
cb44d0cf
...
@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
...
@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
#define AVX_XOR_SPEED \
#define AVX_XOR_SPEED \
do { \
do { \
if (cpu_has_avx &&
cpu_has_osxsave
) \
if (cpu_has_avx &&
boot_cpu_has(X86_FEATURE_OSXSAVE)
) \
xor_speed(&xor_block_avx); \
xor_speed(&xor_block_avx); \
} while (0)
} while (0)
#define AVX_SELECT(FASTEST) \
#define AVX_SELECT(FASTEST) \
(cpu_has_avx &&
cpu_has_osxsave
? &xor_block_avx : FASTEST)
(cpu_has_avx &&
boot_cpu_has(X86_FEATURE_OSXSAVE)
? &xor_block_avx : FASTEST)
#else
#else
...
...
arch/x86/kernel/apic/apic.c
View file @
cb44d0cf
...
@@ -1561,7 +1561,7 @@ void __init check_x2apic(void)
...
@@ -1561,7 +1561,7 @@ void __init check_x2apic(void)
pr_info
(
"x2apic: enabled by BIOS, switching to x2apic ops
\n
"
);
pr_info
(
"x2apic: enabled by BIOS, switching to x2apic ops
\n
"
);
x2apic_mode
=
1
;
x2apic_mode
=
1
;
x2apic_state
=
X2APIC_ON
;
x2apic_state
=
X2APIC_ON
;
}
else
if
(
!
cpu_has_x2apic
)
{
}
else
if
(
!
boot_cpu_has
(
X86_FEATURE_X2APIC
)
)
{
x2apic_state
=
X2APIC_DISABLED
;
x2apic_state
=
X2APIC_DISABLED
;
}
}
}
}
...
...
arch/x86/kernel/cpu/amd.c
View file @
cb44d0cf
...
@@ -751,7 +751,7 @@ static void init_amd(struct cpuinfo_x86 *c)
...
@@ -751,7 +751,7 @@ static void init_amd(struct cpuinfo_x86 *c)
if
(
c
->
x86
>=
0xf
)
if
(
c
->
x86
>=
0xf
)
set_cpu_cap
(
c
,
X86_FEATURE_K8
);
set_cpu_cap
(
c
,
X86_FEATURE_K8
);
if
(
cpu_has
_xmm2
)
{
if
(
cpu_has
(
c
,
X86_FEATURE_XMM2
)
)
{
/* MFENCE stops RDTSC speculation */
/* MFENCE stops RDTSC speculation */
set_cpu_cap
(
c
,
X86_FEATURE_MFENCE_RDTSC
);
set_cpu_cap
(
c
,
X86_FEATURE_MFENCE_RDTSC
);
}
}
...
...
arch/x86/kernel/cpu/intel.c
View file @
cb44d0cf
...
@@ -152,9 +152,9 @@ static void early_init_intel(struct cpuinfo_x86 *c)
...
@@ -152,9 +152,9 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* the TLB when any changes are made to any of the page table entries.
* the TLB when any changes are made to any of the page table entries.
* The operating system must reload CR3 to cause the TLB to be flushed"
* The operating system must reload CR3 to cause the TLB to be flushed"
*
*
* As a result
cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
* As a result
, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
* be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
*
should
be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
* to be modified
* to be modified
.
*/
*/
if
(
c
->
x86
==
5
&&
c
->
x86_model
==
9
)
{
if
(
c
->
x86
==
5
&&
c
->
x86_model
==
9
)
{
pr_info
(
"Disabling PGE capability bit
\n
"
);
pr_info
(
"Disabling PGE capability bit
\n
"
);
...
@@ -456,7 +456,7 @@ static void init_intel(struct cpuinfo_x86 *c)
...
@@ -456,7 +456,7 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap
(
c
,
X86_FEATURE_ARCH_PERFMON
);
set_cpu_cap
(
c
,
X86_FEATURE_ARCH_PERFMON
);
}
}
if
(
cpu_has
_xmm2
)
if
(
cpu_has
(
c
,
X86_FEATURE_XMM2
)
)
set_cpu_cap
(
c
,
X86_FEATURE_LFENCE_RDTSC
);
set_cpu_cap
(
c
,
X86_FEATURE_LFENCE_RDTSC
);
if
(
boot_cpu_has
(
X86_FEATURE_DS
))
{
if
(
boot_cpu_has
(
X86_FEATURE_DS
))
{
...
@@ -468,7 +468,7 @@ static void init_intel(struct cpuinfo_x86 *c)
...
@@ -468,7 +468,7 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap
(
c
,
X86_FEATURE_PEBS
);
set_cpu_cap
(
c
,
X86_FEATURE_PEBS
);
}
}
if
(
c
->
x86
==
6
&&
cpu_has_clflush
&&
if
(
c
->
x86
==
6
&&
boot_cpu_has
(
X86_FEATURE_CLFLUSH
)
&&
(
c
->
x86_model
==
29
||
c
->
x86_model
==
46
||
c
->
x86_model
==
47
))
(
c
->
x86_model
==
29
||
c
->
x86_model
==
46
||
c
->
x86_model
==
47
))
set_cpu_bug
(
c
,
X86_BUG_CLFLUSH_MONITOR
);
set_cpu_bug
(
c
,
X86_BUG_CLFLUSH_MONITOR
);
...
...
arch/x86/kernel/cpu/mtrr/cyrix.c
View file @
cb44d0cf
...
@@ -137,7 +137,7 @@ static void prepare_set(void)
...
@@ -137,7 +137,7 @@ static void prepare_set(void)
u32
cr0
;
u32
cr0
;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if
(
cpu_has_pge
)
{
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
{
cr4
=
__read_cr4
();
cr4
=
__read_cr4
();
__write_cr4
(
cr4
&
~
X86_CR4_PGE
);
__write_cr4
(
cr4
&
~
X86_CR4_PGE
);
}
}
...
@@ -170,7 +170,7 @@ static void post_set(void)
...
@@ -170,7 +170,7 @@ static void post_set(void)
write_cr0
(
read_cr0
()
&
~
X86_CR0_CD
);
write_cr0
(
read_cr0
()
&
~
X86_CR0_CD
);
/* Restore value of CR4 */
/* Restore value of CR4 */
if
(
cpu_has_pge
)
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
__write_cr4
(
cr4
);
__write_cr4
(
cr4
);
}
}
...
...
arch/x86/kernel/cpu/mtrr/generic.c
View file @
cb44d0cf
...
@@ -741,7 +741,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
...
@@ -741,7 +741,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
wbinvd
();
wbinvd
();
/* Save value of CR4 and clear Page Global Enable (bit 7) */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if
(
cpu_has_pge
)
{
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
{
cr4
=
__read_cr4
();
cr4
=
__read_cr4
();
__write_cr4
(
cr4
&
~
X86_CR4_PGE
);
__write_cr4
(
cr4
&
~
X86_CR4_PGE
);
}
}
...
@@ -771,7 +771,7 @@ static void post_set(void) __releases(set_atomicity_lock)
...
@@ -771,7 +771,7 @@ static void post_set(void) __releases(set_atomicity_lock)
write_cr0
(
read_cr0
()
&
~
X86_CR0_CD
);
write_cr0
(
read_cr0
()
&
~
X86_CR0_CD
);
/* Restore value of CR4 */
/* Restore value of CR4 */
if
(
cpu_has_pge
)
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
__write_cr4
(
cr4
);
__write_cr4
(
cr4
);
raw_spin_unlock
(
&
set_atomicity_lock
);
raw_spin_unlock
(
&
set_atomicity_lock
);
}
}
...
...
arch/x86/kernel/cpu/vmware.c
View file @
cb44d0cf
...
@@ -94,7 +94,7 @@ static void __init vmware_platform_setup(void)
...
@@ -94,7 +94,7 @@ static void __init vmware_platform_setup(void)
*/
*/
static
uint32_t
__init
vmware_platform
(
void
)
static
uint32_t
__init
vmware_platform
(
void
)
{
{
if
(
cpu_has_hypervisor
)
{
if
(
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
)
{
unsigned
int
eax
;
unsigned
int
eax
;
unsigned
int
hyper_vendor_id
[
3
];
unsigned
int
hyper_vendor_id
[
3
];
...
...
arch/x86/kernel/kvm.c
View file @
cb44d0cf
...
@@ -522,7 +522,7 @@ static noinline uint32_t __kvm_cpuid_base(void)
...
@@ -522,7 +522,7 @@ static noinline uint32_t __kvm_cpuid_base(void)
if
(
boot_cpu_data
.
cpuid_level
<
0
)
if
(
boot_cpu_data
.
cpuid_level
<
0
)
return
0
;
/* So we don't blow up on old processors */
return
0
;
/* So we don't blow up on old processors */
if
(
cpu_has_hypervisor
)
if
(
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
)
return
hypervisor_cpuid_base
(
"KVMKVMKVM
\0\0\0
"
,
0
);
return
hypervisor_cpuid_base
(
"KVMKVMKVM
\0\0\0
"
,
0
);
return
0
;
return
0
;
...
...
arch/x86/kernel/tce_64.c
View file @
cb44d0cf
...
@@ -40,7 +40,7 @@
...
@@ -40,7 +40,7 @@
static
inline
void
flush_tce
(
void
*
tceaddr
)
static
inline
void
flush_tce
(
void
*
tceaddr
)
{
{
/* a single tce can't cross a cache line */
/* a single tce can't cross a cache line */
if
(
cpu_has_clflush
)
if
(
boot_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
clflush
(
tceaddr
);
clflush
(
tceaddr
);
else
else
wbinvd
();
wbinvd
();
...
...
arch/x86/kvm/mmu.c
View file @
cb44d0cf
...
@@ -3836,7 +3836,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
...
@@ -3836,7 +3836,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask
(
vcpu
,
&
context
->
shadow_zero_check
,
__reset_rsvds_bits_mask
(
vcpu
,
&
context
->
shadow_zero_check
,
boot_cpu_data
.
x86_phys_bits
,
boot_cpu_data
.
x86_phys_bits
,
context
->
shadow_root_level
,
false
,
context
->
shadow_root_level
,
false
,
cpu_has_gbpages
,
true
,
true
);
boot_cpu_has
(
X86_FEATURE_GBPAGES
),
true
,
true
);
else
else
__reset_rsvds_bits_mask_ept
(
&
context
->
shadow_zero_check
,
__reset_rsvds_bits_mask_ept
(
&
context
->
shadow_zero_check
,
boot_cpu_data
.
x86_phys_bits
,
boot_cpu_data
.
x86_phys_bits
,
...
...
arch/x86/lib/usercopy_32.c
View file @
cb44d0cf
...
@@ -612,7 +612,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
...
@@ -612,7 +612,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
{
{
stac
();
stac
();
#ifdef CONFIG_X86_INTEL_USERCOPY
#ifdef CONFIG_X86_INTEL_USERCOPY
if
(
n
>
64
&&
cpu_has_xmm2
)
if
(
n
>
64
&&
static_cpu_has
(
X86_FEATURE_XMM2
)
)
n
=
__copy_user_zeroing_intel_nocache
(
to
,
from
,
n
);
n
=
__copy_user_zeroing_intel_nocache
(
to
,
from
,
n
);
else
else
__copy_user_zeroing
(
to
,
from
,
n
);
__copy_user_zeroing
(
to
,
from
,
n
);
...
@@ -629,7 +629,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
...
@@ -629,7 +629,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
{
{
stac
();
stac
();
#ifdef CONFIG_X86_INTEL_USERCOPY
#ifdef CONFIG_X86_INTEL_USERCOPY
if
(
n
>
64
&&
cpu_has_xmm2
)
if
(
n
>
64
&&
static_cpu_has
(
X86_FEATURE_XMM2
)
)
n
=
__copy_user_intel_nocache
(
to
,
from
,
n
);
n
=
__copy_user_intel_nocache
(
to
,
from
,
n
);
else
else
__copy_user
(
to
,
from
,
n
);
__copy_user
(
to
,
from
,
n
);
...
...
arch/x86/mm/hugetlbpage.c
View file @
cb44d0cf
...
@@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char *opt)
...
@@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char *opt)
unsigned
long
ps
=
memparse
(
opt
,
&
opt
);
unsigned
long
ps
=
memparse
(
opt
,
&
opt
);
if
(
ps
==
PMD_SIZE
)
{
if
(
ps
==
PMD_SIZE
)
{
hugetlb_add_hstate
(
PMD_SHIFT
-
PAGE_SHIFT
);
hugetlb_add_hstate
(
PMD_SHIFT
-
PAGE_SHIFT
);
}
else
if
(
ps
==
PUD_SIZE
&&
cpu_has_gbpages
)
{
}
else
if
(
ps
==
PUD_SIZE
&&
boot_cpu_has
(
X86_FEATURE_GBPAGES
)
)
{
hugetlb_add_hstate
(
PUD_SHIFT
-
PAGE_SHIFT
);
hugetlb_add_hstate
(
PUD_SHIFT
-
PAGE_SHIFT
);
}
else
{
}
else
{
printk
(
KERN_ERR
"hugepagesz: Unsupported page size %lu M
\n
"
,
printk
(
KERN_ERR
"hugepagesz: Unsupported page size %lu M
\n
"
,
...
@@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz);
...
@@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz);
static
__init
int
gigantic_pages_init
(
void
)
static
__init
int
gigantic_pages_init
(
void
)
{
{
/* With compaction or CMA we can allocate gigantic pages at runtime */
/* With compaction or CMA we can allocate gigantic pages at runtime */
if
(
cpu_has_gbpages
&&
!
size_to_hstate
(
1UL
<<
PUD_SHIFT
))
if
(
boot_cpu_has
(
X86_FEATURE_GBPAGES
)
&&
!
size_to_hstate
(
1UL
<<
PUD_SHIFT
))
hugetlb_add_hstate
(
PUD_SHIFT
-
PAGE_SHIFT
);
hugetlb_add_hstate
(
PUD_SHIFT
-
PAGE_SHIFT
);
return
0
;
return
0
;
}
}
...
...
arch/x86/mm/init.c
View file @
cb44d0cf
...
@@ -157,23 +157,23 @@ static void __init probe_page_size_mask(void)
...
@@ -157,23 +157,23 @@ static void __init probe_page_size_mask(void)
* This will simplify cpa(), which otherwise needs to support splitting
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
* large pages into small in interrupt context, etc.
*/
*/
if
(
cpu_has_pse
&&
!
debug_pagealloc_enabled
())
if
(
boot_cpu_has
(
X86_FEATURE_PSE
)
&&
!
debug_pagealloc_enabled
())
page_size_mask
|=
1
<<
PG_LEVEL_2M
;
page_size_mask
|=
1
<<
PG_LEVEL_2M
;
#endif
#endif
/* Enable PSE if available */
/* Enable PSE if available */
if
(
cpu_has_pse
)
if
(
boot_cpu_has
(
X86_FEATURE_PSE
)
)
cr4_set_bits_and_update_boot
(
X86_CR4_PSE
);
cr4_set_bits_and_update_boot
(
X86_CR4_PSE
);
/* Enable PGE if available */
/* Enable PGE if available */
if
(
cpu_has_pge
)
{
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
{
cr4_set_bits_and_update_boot
(
X86_CR4_PGE
);
cr4_set_bits_and_update_boot
(
X86_CR4_PGE
);
__supported_pte_mask
|=
_PAGE_GLOBAL
;
__supported_pte_mask
|=
_PAGE_GLOBAL
;
}
else
}
else
__supported_pte_mask
&=
~
_PAGE_GLOBAL
;
__supported_pte_mask
&=
~
_PAGE_GLOBAL
;
/* Enable 1 GB linear kernel mappings if available: */
/* Enable 1 GB linear kernel mappings if available: */
if
(
direct_gbpages
&&
cpu_has_gbpages
)
{
if
(
direct_gbpages
&&
boot_cpu_has
(
X86_FEATURE_GBPAGES
)
)
{
printk
(
KERN_INFO
"Using GB pages for direct mapping
\n
"
);
printk
(
KERN_INFO
"Using GB pages for direct mapping
\n
"
);
page_size_mask
|=
1
<<
PG_LEVEL_1G
;
page_size_mask
|=
1
<<
PG_LEVEL_1G
;
}
else
{
}
else
{
...
...
arch/x86/mm/init_32.c
View file @
cb44d0cf
...
@@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned long start,
...
@@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned long start,
*/
*/
mapping_iter
=
1
;
mapping_iter
=
1
;
if
(
!
cpu_has_pse
)
if
(
!
boot_cpu_has
(
X86_FEATURE_PSE
)
)
use_pse
=
0
;
use_pse
=
0
;
repeat:
repeat:
...
...
arch/x86/mm/init_64.c
View file @
cb44d0cf
...
@@ -1295,7 +1295,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
...
@@ -1295,7 +1295,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
struct
vmem_altmap
*
altmap
=
to_vmem_altmap
(
start
);
struct
vmem_altmap
*
altmap
=
to_vmem_altmap
(
start
);
int
err
;
int
err
;
if
(
cpu_has_pse
)
if
(
boot_cpu_has
(
X86_FEATURE_PSE
)
)
err
=
vmemmap_populate_hugepages
(
start
,
end
,
node
,
altmap
);
err
=
vmemmap_populate_hugepages
(
start
,
end
,
node
,
altmap
);
else
if
(
altmap
)
{
else
if
(
altmap
)
{
pr_err_once
(
"%s: no cpu support for altmap allocations
\n
"
,
pr_err_once
(
"%s: no cpu support for altmap allocations
\n
"
,
...
@@ -1338,7 +1338,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
...
@@ -1338,7 +1338,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
}
}
get_page_bootmem
(
section_nr
,
pud_page
(
*
pud
),
MIX_SECTION_INFO
);
get_page_bootmem
(
section_nr
,
pud_page
(
*
pud
),
MIX_SECTION_INFO
);
if
(
!
cpu_has_pse
)
{
if
(
!
boot_cpu_has
(
X86_FEATURE_PSE
)
)
{
next
=
(
addr
+
PAGE_SIZE
)
&
PAGE_MASK
;
next
=
(
addr
+
PAGE_SIZE
)
&
PAGE_MASK
;
pmd
=
pmd_offset
(
pud
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
if
(
pmd_none
(
*
pmd
))
if
(
pmd_none
(
*
pmd
))
...
...
arch/x86/mm/ioremap.c
View file @
cb44d0cf
...
@@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
...
@@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
int
__init
arch_ioremap_pud_supported
(
void
)
int
__init
arch_ioremap_pud_supported
(
void
)
{
{
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
return
cpu_has_gbpages
;
return
boot_cpu_has
(
X86_FEATURE_GBPAGES
)
;
#else
#else
return
0
;
return
0
;
#endif
#endif
...
@@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(void)
...
@@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(void)
int
__init
arch_ioremap_pmd_supported
(
void
)
int
__init
arch_ioremap_pmd_supported
(
void
)
{
{
return
cpu_has_pse
;
return
boot_cpu_has
(
X86_FEATURE_PSE
)
;
}
}
/*
/*
...
...
arch/x86/mm/pageattr.c
View file @
cb44d0cf
...
@@ -1055,7 +1055,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
...
@@ -1055,7 +1055,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/*
/*
* Map everything starting from the Gb boundary, possibly with 1G pages
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
*/
while
(
cpu_has_gbpages
&&
end
-
start
>=
PUD_SIZE
)
{
while
(
boot_cpu_has
(
X86_FEATURE_GBPAGES
)
&&
end
-
start
>=
PUD_SIZE
)
{
set_pud
(
pud
,
__pud
(
cpa
->
pfn
<<
PAGE_SHIFT
|
_PAGE_PSE
|
set_pud
(
pud
,
__pud
(
cpa
->
pfn
<<
PAGE_SHIFT
|
_PAGE_PSE
|
massage_pgprot
(
pud_pgprot
)));
massage_pgprot
(
pud_pgprot
)));
...
@@ -1460,7 +1460,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
...
@@ -1460,7 +1460,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
* error case we fall back to cpa_flush_all (which uses
* error case we fall back to cpa_flush_all (which uses
* WBINVD):
* WBINVD):
*/
*/
if
(
!
ret
&&
cpu_has_clflush
)
{
if
(
!
ret
&&
boot_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
{
if
(
cpa
.
flags
&
(
CPA_PAGES_ARRAY
|
CPA_ARRAY
))
{
if
(
cpa
.
flags
&
(
CPA_PAGES_ARRAY
|
CPA_ARRAY
))
{
cpa_flush_array
(
addr
,
numpages
,
cache
,
cpa_flush_array
(
addr
,
numpages
,
cache
,
cpa
.
flags
,
pages
);
cpa
.
flags
,
pages
);
...
...
arch/x86/oprofile/nmi_int.c
View file @
cb44d0cf
...
@@ -636,7 +636,7 @@ static int __init ppro_init(char **cpu_type)
...
@@ -636,7 +636,7 @@ static int __init ppro_init(char **cpu_type)
__u8
cpu_model
=
boot_cpu_data
.
x86_model
;
__u8
cpu_model
=
boot_cpu_data
.
x86_model
;
struct
op_x86_model_spec
*
spec
=
&
op_ppro_spec
;
/* default */
struct
op_x86_model_spec
*
spec
=
&
op_ppro_spec
;
/* default */
if
(
force_cpu_type
==
arch_perfmon
&&
cpu_has_arch_perfmon
)
if
(
force_cpu_type
==
arch_perfmon
&&
boot_cpu_has
(
X86_FEATURE_ARCH_PERFMON
)
)
return
0
;
return
0
;
/*
/*
...
@@ -761,7 +761,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
...
@@ -761,7 +761,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
if
(
cpu_type
)
if
(
cpu_type
)
break
;
break
;
if
(
!
cpu_has_arch_perfmon
)
if
(
!
boot_cpu_has
(
X86_FEATURE_ARCH_PERFMON
)
)
return
-
ENODEV
;
return
-
ENODEV
;
/* use arch perfmon as fallback */
/* use arch perfmon as fallback */
...
...
arch/x86/oprofile/op_model_ppro.c
View file @
cb44d0cf
...
@@ -75,7 +75,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
...
@@ -75,7 +75,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
u64
val
;
u64
val
;
int
i
;
int
i
;
if
(
cpu_has_arch_perfmon
)
{
if
(
boot_cpu_has
(
X86_FEATURE_ARCH_PERFMON
)
)
{
union
cpuid10_eax
eax
;
union
cpuid10_eax
eax
;
eax
.
full
=
cpuid_eax
(
0xa
);
eax
.
full
=
cpuid_eax
(
0xa
);
...
...
arch/x86/power/hibernate_32.c
View file @
cb44d0cf
...
@@ -106,7 +106,7 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
...
@@ -106,7 +106,7 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
* normal page tables.
* normal page tables.
* NOTE: We can mark everything as executable here
* NOTE: We can mark everything as executable here
*/
*/
if
(
cpu_has_pse
)
{
if
(
boot_cpu_has
(
X86_FEATURE_PSE
)
)
{
set_pmd
(
pmd
,
pfn_pmd
(
pfn
,
PAGE_KERNEL_LARGE_EXEC
));
set_pmd
(
pmd
,
pfn_pmd
(
pfn
,
PAGE_KERNEL_LARGE_EXEC
));
pfn
+=
PTRS_PER_PTE
;
pfn
+=
PTRS_PER_PTE
;
}
else
{
}
else
{
...
...
arch/x86/xen/enlighten.c
View file @
cb44d0cf
...
@@ -1469,10 +1469,10 @@ static void xen_pvh_set_cr_flags(int cpu)
...
@@ -1469,10 +1469,10 @@ static void xen_pvh_set_cr_flags(int cpu)
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
*/
*/
if
(
cpu_has_pse
)
if
(
boot_cpu_has
(
X86_FEATURE_PSE
)
)
cr4_set_bits_and_update_boot
(
X86_CR4_PSE
);
cr4_set_bits_and_update_boot
(
X86_CR4_PSE
);
if
(
cpu_has_pge
)
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
cr4_set_bits_and_update_boot
(
X86_CR4_PGE
);
cr4_set_bits_and_update_boot
(
X86_CR4_PGE
);
}
}
...
...
crypto/asymmetric_keys/pkcs7_trust.c
View file @
cb44d0cf
...
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
...
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
int
cached_ret
=
-
ENOKEY
;
int
cached_ret
=
-
ENOKEY
;
int
ret
;
int
ret
;
*
_trusted
=
false
;
for
(
p
=
pkcs7
->
certs
;
p
;
p
=
p
->
next
)
for
(
p
=
pkcs7
->
certs
;
p
;
p
=
p
->
next
)
p
->
seen
=
false
;
p
->
seen
=
false
;
...
...
drivers/gpu/drm/drm_cache.c
View file @
cb44d0cf
...
@@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
...
@@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
{
#if defined(CONFIG_X86)
#if defined(CONFIG_X86)
if
(
cpu_has_clflush
)
{
if
(
static_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
{
drm_cache_flush_clflush
(
pages
,
num_pages
);
drm_cache_flush_clflush
(
pages
,
num_pages
);
return
;
return
;
}
}
...
@@ -105,7 +105,7 @@ void
...
@@ -105,7 +105,7 @@ void
drm_clflush_sg
(
struct
sg_table
*
st
)
drm_clflush_sg
(
struct
sg_table
*
st
)
{
{
#if defined(CONFIG_X86)
#if defined(CONFIG_X86)
if
(
cpu_has_clflush
)
{
if
(
static_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
{
struct
sg_page_iter
sg_iter
;
struct
sg_page_iter
sg_iter
;
mb
();
mb
();
...
@@ -129,7 +129,7 @@ void
...
@@ -129,7 +129,7 @@ void
drm_clflush_virt_range
(
void
*
addr
,
unsigned
long
length
)
drm_clflush_virt_range
(
void
*
addr
,
unsigned
long
length
)
{
{
#if defined(CONFIG_X86)
#if defined(CONFIG_X86)
if
(
cpu_has_clflush
)
{
if
(
static_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
{
const
int
size
=
boot_cpu_data
.
x86_clflush_size
;
const
int
size
=
boot_cpu_data
.
x86_clflush_size
;
void
*
end
=
addr
+
length
;
void
*
end
=
addr
+
length
;
addr
=
(
void
*
)(((
unsigned
long
)
addr
)
&
-
size
);
addr
=
(
void
*
)(((
unsigned
long
)
addr
)
&
-
size
);
...
...
drivers/gpu/drm/i915/i915_gem_execbuffer.c
View file @
cb44d0cf
...
@@ -488,7 +488,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
...
@@ -488,7 +488,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
ret
=
relocate_entry_cpu
(
obj
,
reloc
,
target_offset
);
ret
=
relocate_entry_cpu
(
obj
,
reloc
,
target_offset
);
else
if
(
obj
->
map_and_fenceable
)
else
if
(
obj
->
map_and_fenceable
)
ret
=
relocate_entry_gtt
(
obj
,
reloc
,
target_offset
);
ret
=
relocate_entry_gtt
(
obj
,
reloc
,
target_offset
);
else
if
(
cpu_has_clflush
)
else
if
(
static_cpu_has
(
X86_FEATURE_CLFLUSH
)
)
ret
=
relocate_entry_clflush
(
obj
,
reloc
,
target_offset
);
ret
=
relocate_entry_clflush
(
obj
,
reloc
,
target_offset
);
else
{
else
{
WARN_ONCE
(
1
,
"Impossible case in relocation handling
\n
"
);
WARN_ONCE
(
1
,
"Impossible case in relocation handling
\n
"
);
...
...
drivers/hwmon/max1111.c
View file @
cb44d0cf
...
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
...
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int
max1111_read_channel
(
int
channel
)
int
max1111_read_channel
(
int
channel
)
{
{
if
(
!
the_max1111
||
!
the_max1111
->
spi
)
return
-
ENODEV
;
return
max1111_read
(
&
the_max1111
->
spi
->
dev
,
channel
);
return
max1111_read
(
&
the_max1111
->
spi
->
dev
,
channel
);
}
}
EXPORT_SYMBOL
(
max1111_read_channel
);
EXPORT_SYMBOL
(
max1111_read_channel
);
...
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
...
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
{
struct
max1111_data
*
data
=
spi_get_drvdata
(
spi
);
struct
max1111_data
*
data
=
spi_get_drvdata
(
spi
);
#ifdef CONFIG_SHARPSL_PM
the_max1111
=
NULL
;
#endif
hwmon_device_unregister
(
data
->
hwmon_dev
);
hwmon_device_unregister
(
data
->
hwmon_dev
);
sysfs_remove_group
(
&
spi
->
dev
.
kobj
,
&
max1110_attr_group
);
sysfs_remove_group
(
&
spi
->
dev
.
kobj
,
&
max1110_attr_group
);
sysfs_remove_group
(
&
spi
->
dev
.
kobj
,
&
max1111_attr_group
);
sysfs_remove_group
(
&
spi
->
dev
.
kobj
,
&
max1111_attr_group
);
...
...
drivers/lguest/x86/core.c
View file @
cb44d0cf
...
@@ -599,7 +599,7 @@ void __init lguest_arch_host_init(void)
...
@@ -599,7 +599,7 @@ void __init lguest_arch_host_init(void)
* doing this.
* doing this.
*/
*/
get_online_cpus
();
get_online_cpus
();
if
(
cpu_has_pge
)
{
/* We have a broader idea of "global". */
if
(
boot_cpu_has
(
X86_FEATURE_PGE
)
)
{
/* We have a broader idea of "global". */
/* Remember that this was originally set (for cleanup). */
/* Remember that this was originally set (for cleanup). */
cpu_had_pge
=
1
;
cpu_had_pge
=
1
;
/*
/*
...
...
drivers/staging/unisys/visorbus/visorchipset.c
View file @
cb44d0cf
...
@@ -2425,7 +2425,7 @@ static __init uint32_t visorutil_spar_detect(void)
...
@@ -2425,7 +2425,7 @@ static __init uint32_t visorutil_spar_detect(void)
{
{
unsigned
int
eax
,
ebx
,
ecx
,
edx
;
unsigned
int
eax
,
ebx
,
ecx
,
edx
;
if
(
cpu_has_hypervisor
)
{
if
(
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
)
{
/* check the ID */
/* check the ID */
cpuid
(
UNISYS_SPAR_LEAF_ID
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
cpuid
(
UNISYS_SPAR_LEAF_ID
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
return
(
ebx
==
UNISYS_SPAR_ID_EBX
)
&&
return
(
ebx
==
UNISYS_SPAR_ID_EBX
)
&&
...
...
fs/dlm/config.c
View file @
cb44d0cf
...
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
...
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
struct
dlm_cluster
*
cl
=
NULL
;
struct
dlm_cluster
*
cl
=
NULL
;
struct
dlm_spaces
*
sps
=
NULL
;
struct
dlm_spaces
*
sps
=
NULL
;
struct
dlm_comms
*
cms
=
NULL
;
struct
dlm_comms
*
cms
=
NULL
;
void
*
gps
=
NULL
;
cl
=
kzalloc
(
sizeof
(
struct
dlm_cluster
),
GFP_NOFS
);
cl
=
kzalloc
(
sizeof
(
struct
dlm_cluster
),
GFP_NOFS
);
sps
=
kzalloc
(
sizeof
(
struct
dlm_spaces
),
GFP_NOFS
);
sps
=
kzalloc
(
sizeof
(
struct
dlm_spaces
),
GFP_NOFS
);
cms
=
kzalloc
(
sizeof
(
struct
dlm_comms
),
GFP_NOFS
);
cms
=
kzalloc
(
sizeof
(
struct
dlm_comms
),
GFP_NOFS
);
if
(
!
cl
||
!
gps
||
!
sps
||
!
cms
)
if
(
!
cl
||
!
sps
||
!
cms
)
goto
fail
;
goto
fail
;
config_group_init_type_name
(
&
cl
->
group
,
name
,
&
cluster_type
);
config_group_init_type_name
(
&
cl
->
group
,
name
,
&
cluster_type
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment