Commit 499f8f84 authored by Andreas Herrmann's avatar Andreas Herrmann Committed by Ingo Molnar

x86: rename pat_wc_enabled to pat_enabled

BTW, what does pat_wc_enabled stand for? Does it mean
"write-combining"?

Currently it is used to globally switch on or off PAT support.
Thus I renamed it to pat_enabled.
I think this increases readability (and hope that I didn't miss
something).
Signed-off-by: default avatarAndreas Herrmann <andreas.herrmann3@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cd7a4e93
...@@ -261,7 +261,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) ...@@ -261,7 +261,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{ {
/* /*
* Ideally, this should be: * Ideally, this should be:
* pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
* *
* Till we fix all X drivers to use ioremap_wc(), we will use * Till we fix all X drivers to use ioremap_wc(), we will use
* UC MINUS. * UC MINUS.
...@@ -285,7 +285,7 @@ EXPORT_SYMBOL(ioremap_nocache); ...@@ -285,7 +285,7 @@ EXPORT_SYMBOL(ioremap_nocache);
*/ */
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
{ {
if (pat_wc_enabled) if (pat_enabled)
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
__builtin_return_address(0)); __builtin_return_address(0));
else else
......
...@@ -805,7 +805,7 @@ int _set_memory_wc(unsigned long addr, int numpages) ...@@ -805,7 +805,7 @@ int _set_memory_wc(unsigned long addr, int numpages)
int set_memory_wc(unsigned long addr, int numpages) int set_memory_wc(unsigned long addr, int numpages)
{ {
if (!pat_wc_enabled) if (!pat_enabled)
return set_memory_uc(addr, numpages); return set_memory_uc(addr, numpages);
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
......
...@@ -26,11 +26,11 @@ ...@@ -26,11 +26,11 @@
#include <asm/io.h> #include <asm/io.h>
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
int __read_mostly pat_wc_enabled = 1; int __read_mostly pat_enabled = 1;
void __cpuinit pat_disable(char *reason) void __cpuinit pat_disable(char *reason)
{ {
pat_wc_enabled = 0; pat_enabled = 0;
printk(KERN_INFO "%s\n", reason); printk(KERN_INFO "%s\n", reason);
} }
...@@ -72,7 +72,7 @@ void pat_init(void) ...@@ -72,7 +72,7 @@ void pat_init(void)
{ {
u64 pat; u64 pat;
if (!pat_wc_enabled) if (!pat_enabled)
return; return;
/* Paranoia check. */ /* Paranoia check. */
...@@ -225,8 +225,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -225,8 +225,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
unsigned long actual_type; unsigned long actual_type;
int err = 0; int err = 0;
/* Only track when pat_wc_enabled */ /* Only track when pat_enabled */
if (!pat_wc_enabled) { if (!pat_enabled) {
/* This is identical to page table setting without PAT */ /* This is identical to page table setting without PAT */
if (ret_type) { if (ret_type) {
if (req_type == -1) { if (req_type == -1) {
...@@ -440,8 +440,8 @@ int free_memtype(u64 start, u64 end) ...@@ -440,8 +440,8 @@ int free_memtype(u64 start, u64 end)
struct memtype *ml; struct memtype *ml;
int err = -EINVAL; int err = -EINVAL;
/* Only track when pat_wc_enabled */ /* Only track when pat_enabled */
if (!pat_wc_enabled) { if (!pat_enabled) {
return 0; return 0;
} }
...@@ -535,7 +535,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, ...@@ -535,7 +535,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
* caching for the high addresses through the KEN pin, but * caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code. * we maintain the tradition of paranoia in this code.
*/ */
if (!pat_wc_enabled && if (!pat_enabled &&
!(boot_cpu_has(X86_FEATURE_MTRR) || !(boot_cpu_has(X86_FEATURE_MTRR) ||
boot_cpu_has(X86_FEATURE_K6_MTRR) || boot_cpu_has(X86_FEATURE_K6_MTRR) ||
boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
......
...@@ -299,9 +299,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -299,9 +299,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
prot = pgprot_val(vma->vm_page_prot); prot = pgprot_val(vma->vm_page_prot);
if (pat_wc_enabled && write_combine) if (pat_enabled && write_combine)
prot |= _PAGE_CACHE_WC; prot |= _PAGE_CACHE_WC;
else if (pat_wc_enabled || boot_cpu_data.x86 > 3) else if (pat_enabled || boot_cpu_data.x86 > 3)
/* /*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here * To avoid attribute conflicts, request UC MINUS here
......
...@@ -4,10 +4,10 @@ ...@@ -4,10 +4,10 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
extern int pat_wc_enabled; extern int pat_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c); extern void validate_pat_support(struct cpuinfo_x86 *c);
#else #else
static const int pat_wc_enabled; static const int pat_enabled;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { } static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment