Commit b21d1ff2 authored by Olof Johansson's avatar Olof Johansson Committed by Linus Torvalds

[PATCH] PPC/PPC64: Abstract cpu_feature checks.

Abstract most manual mask checks of cpu_features with cpu_has_feature()
Signed-off-by: default avatarOlof Johansson <olof@austin.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1eeae015
...@@ -108,7 +108,7 @@ static int ppc_htab_show(struct seq_file *m, void *v) ...@@ -108,7 +108,7 @@ static int ppc_htab_show(struct seq_file *m, void *v)
PTE *ptr; PTE *ptr;
#endif /* CONFIG_PPC_STD_MMU */ #endif /* CONFIG_PPC_STD_MMU */
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON) { if (cpu_has_feature(CPU_FTR_604_PERF_MON)) {
mmcr0 = mfspr(SPRN_MMCR0); mmcr0 = mfspr(SPRN_MMCR0);
pmc1 = mfspr(SPRN_PMC1); pmc1 = mfspr(SPRN_PMC1);
pmc2 = mfspr(SPRN_PMC2); pmc2 = mfspr(SPRN_PMC2);
...@@ -209,7 +209,7 @@ static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer, ...@@ -209,7 +209,7 @@ static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer,
if ( !strncmp( buffer, "reset", 5) ) if ( !strncmp( buffer, "reset", 5) )
{ {
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON) { if (cpu_has_feature(CPU_FTR_604_PERF_MON)) {
/* reset PMC1 and PMC2 */ /* reset PMC1 and PMC2 */
mtspr(SPRN_PMC1, 0); mtspr(SPRN_PMC1, 0);
mtspr(SPRN_PMC2, 0); mtspr(SPRN_PMC2, 0);
...@@ -221,7 +221,7 @@ static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer, ...@@ -221,7 +221,7 @@ static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer,
} }
/* Everything below here requires the performance monitor feature. */ /* Everything below here requires the performance monitor feature. */
if ( !cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON ) if (!cpu_has_feature(CPU_FTR_604_PERF_MON))
return count; return count;
/* turn off performance monitoring */ /* turn off performance monitoring */
...@@ -339,7 +339,7 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp, ...@@ -339,7 +339,7 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
"0.5", "1.0", "(reserved2)", "(reserved3)" "0.5", "1.0", "(reserved2)", "(reserved3)"
}; };
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR)) if (!cpu_has_feature(CPU_FTR_L2CR))
return -EFAULT; return -EFAULT;
if ( /*!table->maxlen ||*/ (*ppos && !write)) { if ( /*!table->maxlen ||*/ (*ppos && !write)) {
......
...@@ -619,7 +619,7 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -619,7 +619,7 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
/* Checks "l2cr=xxxx" command-line option */ /* Checks "l2cr=xxxx" command-line option */
int __init ppc_setup_l2cr(char *str) int __init ppc_setup_l2cr(char *str)
{ {
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) { if (cpu_has_feature(CPU_FTR_L2CR)) {
unsigned long val = simple_strtoul(str, NULL, 0); unsigned long val = simple_strtoul(str, NULL, 0);
printk(KERN_INFO "l2cr set to %lx\n", val); printk(KERN_INFO "l2cr set to %lx\n", val);
_set_L2CR(0); /* force invalidate by disable cache */ _set_L2CR(0); /* force invalidate by disable cache */
...@@ -720,7 +720,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -720,7 +720,7 @@ void __init setup_arch(char **cmdline_p)
* Systems with OF can look in the properties on the cpu node(s) * Systems with OF can look in the properties on the cpu node(s)
* for a possibly more accurate value. * for a possibly more accurate value.
*/ */
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_SPLIT_ID_CACHE) { if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
dcache_bsize = cur_cpu_spec[0]->dcache_bsize; dcache_bsize = cur_cpu_spec[0]->dcache_bsize;
icache_bsize = cur_cpu_spec[0]->icache_bsize; icache_bsize = cur_cpu_spec[0]->icache_bsize;
ucache_bsize = 0; ucache_bsize = 0;
......
...@@ -223,7 +223,7 @@ int __init TAU_init(void) ...@@ -223,7 +223,7 @@ int __init TAU_init(void)
/* We assume in SMP that if one CPU has TAU support, they /* We assume in SMP that if one CPU has TAU support, they
* all have it --BenH * all have it --BenH
*/ */
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_TAU)) { if (!cpu_has_feature(CPU_FTR_TAU)) {
printk("Thermal assist unit not available\n"); printk("Thermal assist unit not available\n");
tau_initialized = 0; tau_initialized = 0;
return 1; return 1;
......
...@@ -75,7 +75,7 @@ static inline void flush_HPTE(unsigned context, unsigned long va, ...@@ -75,7 +75,7 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
unsigned long pdval) unsigned long pdval)
{ {
if ((Hash != 0) && if ((Hash != 0) &&
(cur_cpu_spec[0]->cpu_features & CPU_FTR_HPTE_TABLE)) cpu_has_feature(CPU_FTR_HPTE_TABLE))
flush_hash_pages(0, va, pdval, 1); flush_hash_pages(0, va, pdval, 1);
else else
_tlbie(va); _tlbie(va);
......
...@@ -138,7 +138,7 @@ void __init setbat(int index, unsigned long virt, unsigned long phys, ...@@ -138,7 +138,7 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
union ubat *bat = BATS[index]; union ubat *bat = BATS[index];
if (((flags & _PAGE_NO_CACHE) == 0) && if (((flags & _PAGE_NO_CACHE) == 0) &&
(cur_cpu_spec[0]->cpu_features & CPU_FTR_NEED_COHERENT)) cpu_has_feature(CPU_FTR_NEED_COHERENT))
flags |= _PAGE_COHERENT; flags |= _PAGE_COHERENT;
bl = (size >> 17) - 1; bl = (size >> 17) - 1;
...@@ -191,7 +191,7 @@ void __init MMU_init_hw(void) ...@@ -191,7 +191,7 @@ void __init MMU_init_hw(void)
extern unsigned int hash_page[]; extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_HPTE_TABLE) == 0) { if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
/* /*
* Put a blr (procedure return) instruction at the * Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI * start of hash_page, since we can still get DSI
......
...@@ -230,7 +230,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed) ...@@ -230,7 +230,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed)
enable_kernel_fp(); enable_kernel_fp();
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec(); enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
......
...@@ -274,7 +274,7 @@ pmac_setup_arch(void) ...@@ -274,7 +274,7 @@ pmac_setup_arch(void)
pmac_find_bridges(); pmac_find_bridges();
/* Checks "l2cr-value" property in the registry */ /* Checks "l2cr-value" property in the registry */
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) { if (cpu_has_feature(CPU_FTR_L2CR)) {
struct device_node *np = find_devices("cpus"); struct device_node *np = find_devices("cpus");
if (np == 0) if (np == 0)
np = find_type_devices("cpu"); np = find_type_devices("cpu");
......
...@@ -119,7 +119,7 @@ static volatile int sec_tb_reset = 0; ...@@ -119,7 +119,7 @@ static volatile int sec_tb_reset = 0;
static void __init core99_init_caches(int cpu) static void __init core99_init_caches(int cpu)
{ {
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR)) if (!cpu_has_feature(CPU_FTR_L2CR))
return; return;
if (cpu == 0) { if (cpu == 0) {
...@@ -132,7 +132,7 @@ static void __init core99_init_caches(int cpu) ...@@ -132,7 +132,7 @@ static void __init core99_init_caches(int cpu)
printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
} }
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)) if (!cpu_has_feature(CPU_FTR_L3CR))
return; return;
if (cpu == 0){ if (cpu == 0){
......
...@@ -319,10 +319,10 @@ sandpoint_setup_arch(void) ...@@ -319,10 +319,10 @@ sandpoint_setup_arch(void)
* We will do this now with good known values. Future versions * We will do this now with good known values. Future versions
* of DINK32 are supposed to get this correct. * of DINK32 are supposed to get this correct.
*/ */
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) if (cpu_has_feature(CPU_FTR_SPEC7450))
/* 745x is different. We only want to pass along enable. */ /* 745x is different. We only want to pass along enable. */
_set_L2CR(L2CR_L2E); _set_L2CR(L2CR_L2E);
else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) else if (cpu_has_feature(CPU_FTR_L2CR))
/* All modules have 1MB of L2. We also assume that an /* All modules have 1MB of L2. We also assume that an
* L2 divisor of 3 will work. * L2 divisor of 3 will work.
*/ */
...@@ -330,7 +330,7 @@ sandpoint_setup_arch(void) ...@@ -330,7 +330,7 @@ sandpoint_setup_arch(void)
| L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF); | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
#if 0 #if 0
/* Untested right now. */ /* Untested right now. */
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR) { if (cpu_has_feature(CPU_FTR_L3CR)) {
/* Magic value. */ /* Magic value. */
_set_L3CR(0x8f032000); _set_L3CR(0x8f032000);
} }
......
...@@ -238,7 +238,7 @@ fix_alignment(struct pt_regs *regs) ...@@ -238,7 +238,7 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr; dsisr = regs->dsisr;
if (cur_cpu_spec->cpu_features & CPU_FTR_NODSISRALIGN) { if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
unsigned int real_instr; unsigned int real_instr;
if (__get_user(real_instr, (unsigned int __user *)regs->nip)) if (__get_user(real_instr, (unsigned int __user *)regs->nip))
return 0; return 0;
......
...@@ -267,7 +267,7 @@ unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array, ...@@ -267,7 +267,7 @@ unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long i; unsigned long i;
unsigned long mem_blocks = 0; unsigned long mem_blocks = 0;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) if (cpu_has_feature(CPU_FTR_SLB))
mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
max_entries); max_entries);
else else
......
...@@ -505,7 +505,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, ...@@ -505,7 +505,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
int i; int i;
unsigned long flags = 0; unsigned long flags = 0;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
......
...@@ -388,12 +388,12 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -388,12 +388,12 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp; kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD; sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp; p->thread.ksp = sp;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) { if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid = get_kernel_vsid(sp); unsigned long sp_vsid = get_kernel_vsid(sp);
sp_vsid <<= SLB_VSID_SHIFT; sp_vsid <<= SLB_VSID_SHIFT;
sp_vsid |= SLB_VSID_KERNEL; sp_vsid |= SLB_VSID_KERNEL;
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) if (cpu_has_feature(CPU_FTR_16M_PAGE))
sp_vsid |= SLB_VSID_L; sp_vsid |= SLB_VSID_L;
p->thread.ksp_vsid = sp_vsid; p->thread.ksp_vsid = sp_vsid;
......
...@@ -315,7 +315,7 @@ static void __init setup_cpu_maps(void) ...@@ -315,7 +315,7 @@ static void __init setup_cpu_maps(void)
maxcpus = ireg[num_addr_cell + num_size_cell]; maxcpus = ireg[num_addr_cell + num_size_cell];
/* Double maxcpus for processors which have SMT capability */ /* Double maxcpus for processors which have SMT capability */
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= 2; maxcpus *= 2;
if (maxcpus > NR_CPUS) { if (maxcpus > NR_CPUS) {
...@@ -339,7 +339,7 @@ static void __init setup_cpu_maps(void) ...@@ -339,7 +339,7 @@ static void __init setup_cpu_maps(void)
*/ */
for_each_cpu(cpu) { for_each_cpu(cpu) {
cpu_set(cpu, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[cpu]);
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
} }
...@@ -767,7 +767,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -767,7 +767,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "unknown (%08x)", pvr); seq_printf(m, "unknown (%08x)", pvr);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC) if (cpu_has_feature(CPU_FTR_ALTIVEC))
seq_printf(m, ", altivec supported"); seq_printf(m, ", altivec supported");
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
......
...@@ -416,7 +416,7 @@ int __devinit __cpu_up(unsigned int cpu) ...@@ -416,7 +416,7 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock; paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) { if (!cpu_has_feature(CPU_FTR_SLB)) {
void *tmp; void *tmp;
/* maximum of 48 CPUs on machines with a segment table */ /* maximum of 48 CPUs on machines with a segment table */
......
...@@ -63,7 +63,7 @@ static int __init smt_setup(void) ...@@ -63,7 +63,7 @@ static int __init smt_setup(void)
unsigned int *val; unsigned int *val;
unsigned int cpu; unsigned int cpu;
if (!cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (!cpu_has_feature(CPU_FTR_SMT))
return 1; return 1;
options = find_path_device("/options"); options = find_path_device("/options");
...@@ -86,7 +86,7 @@ static int __init setup_smt_snooze_delay(char *str) ...@@ -86,7 +86,7 @@ static int __init setup_smt_snooze_delay(char *str)
unsigned int cpu; unsigned int cpu;
int snooze; int snooze;
if (!cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (!cpu_has_feature(CPU_FTR_SMT))
return 1; return 1;
smt_snooze_cmdline = 1; smt_snooze_cmdline = 1;
...@@ -167,7 +167,7 @@ void ppc64_enable_pmcs(void) ...@@ -167,7 +167,7 @@ void ppc64_enable_pmcs(void)
* On SMT machines we have to set the run latch in the ctrl register * On SMT machines we have to set the run latch in the ctrl register
* in order to make PMC6 spin. * in order to make PMC6 spin.
*/ */
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) { if (cpu_has_feature(CPU_FTR_SMT)) {
ctrl = mfspr(CTRLF); ctrl = mfspr(CTRLF);
ctrl |= RUNLATCH; ctrl |= RUNLATCH;
mtspr(CTRLT, ctrl); mtspr(CTRLT, ctrl);
...@@ -266,7 +266,7 @@ static void register_cpu_online(unsigned int cpu) ...@@ -266,7 +266,7 @@ static void register_cpu_online(unsigned int cpu)
struct sys_device *s = &c->sysdev; struct sys_device *s = &c->sysdev;
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
sysdev_create_file(s, &attr_smt_snooze_delay); sysdev_create_file(s, &attr_smt_snooze_delay);
#endif #endif
...@@ -275,7 +275,7 @@ static void register_cpu_online(unsigned int cpu) ...@@ -275,7 +275,7 @@ static void register_cpu_online(unsigned int cpu)
sysdev_create_file(s, &attr_mmcr0); sysdev_create_file(s, &attr_mmcr0);
sysdev_create_file(s, &attr_mmcr1); sysdev_create_file(s, &attr_mmcr1);
if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA) if (cpu_has_feature(CPU_FTR_MMCRA))
sysdev_create_file(s, &attr_mmcra); sysdev_create_file(s, &attr_mmcra);
sysdev_create_file(s, &attr_pmc1); sysdev_create_file(s, &attr_pmc1);
...@@ -285,12 +285,12 @@ static void register_cpu_online(unsigned int cpu) ...@@ -285,12 +285,12 @@ static void register_cpu_online(unsigned int cpu)
sysdev_create_file(s, &attr_pmc5); sysdev_create_file(s, &attr_pmc5);
sysdev_create_file(s, &attr_pmc6); sysdev_create_file(s, &attr_pmc6);
if (cur_cpu_spec->cpu_features & CPU_FTR_PMC8) { if (cpu_has_feature(CPU_FTR_PMC8)) {
sysdev_create_file(s, &attr_pmc7); sysdev_create_file(s, &attr_pmc7);
sysdev_create_file(s, &attr_pmc8); sysdev_create_file(s, &attr_pmc8);
} }
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
sysdev_create_file(s, &attr_purr); sysdev_create_file(s, &attr_purr);
} }
...@@ -303,7 +303,7 @@ static void unregister_cpu_online(unsigned int cpu) ...@@ -303,7 +303,7 @@ static void unregister_cpu_online(unsigned int cpu)
BUG_ON(c->no_control); BUG_ON(c->no_control);
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
sysdev_remove_file(s, &attr_smt_snooze_delay); sysdev_remove_file(s, &attr_smt_snooze_delay);
#endif #endif
...@@ -312,7 +312,7 @@ static void unregister_cpu_online(unsigned int cpu) ...@@ -312,7 +312,7 @@ static void unregister_cpu_online(unsigned int cpu)
sysdev_remove_file(s, &attr_mmcr0); sysdev_remove_file(s, &attr_mmcr0);
sysdev_remove_file(s, &attr_mmcr1); sysdev_remove_file(s, &attr_mmcr1);
if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA) if (cpu_has_feature(CPU_FTR_MMCRA))
sysdev_remove_file(s, &attr_mmcra); sysdev_remove_file(s, &attr_mmcra);
sysdev_remove_file(s, &attr_pmc1); sysdev_remove_file(s, &attr_pmc1);
...@@ -322,12 +322,12 @@ static void unregister_cpu_online(unsigned int cpu) ...@@ -322,12 +322,12 @@ static void unregister_cpu_online(unsigned int cpu)
sysdev_remove_file(s, &attr_pmc5); sysdev_remove_file(s, &attr_pmc5);
sysdev_remove_file(s, &attr_pmc6); sysdev_remove_file(s, &attr_pmc6);
if (cur_cpu_spec->cpu_features & CPU_FTR_PMC8) { if (cpu_has_feature(CPU_FTR_PMC8)) {
sysdev_remove_file(s, &attr_pmc7); sysdev_remove_file(s, &attr_pmc7);
sysdev_remove_file(s, &attr_pmc8); sysdev_remove_file(s, &attr_pmc8);
} }
if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) if (cpu_has_feature(CPU_FTR_SMT))
sysdev_remove_file(s, &attr_purr); sysdev_remove_file(s, &attr_purr);
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
......
...@@ -217,10 +217,10 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -217,10 +217,10 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} }
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) { if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
tlbiel(va); tlbiel(va);
} else { } else {
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
spin_lock(&native_tlbie_lock); spin_lock(&native_tlbie_lock);
...@@ -245,7 +245,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -245,7 +245,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
unsigned long vsid, va, vpn, flags = 0; unsigned long vsid, va, vpn, flags = 0;
long slot; long slot;
HPTE *hptep; HPTE *hptep;
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
...@@ -273,7 +273,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -273,7 +273,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
Hpte_dword0 dw0; Hpte_dword0 dw0;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (large) if (large)
avpn &= ~0x1UL; avpn &= ~0x1UL;
...@@ -292,7 +292,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -292,7 +292,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
} }
/* Invalidate the tlb */ /* Invalidate the tlb */
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) { if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
tlbiel(va); tlbiel(va);
} else { } else {
if (lock_tlbie) if (lock_tlbie)
...@@ -360,7 +360,7 @@ static void native_flush_hash_range(unsigned long context, ...@@ -360,7 +360,7 @@ static void native_flush_hash_range(unsigned long context,
j++; j++;
} }
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) { if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) for (i = 0; i < j; i++)
...@@ -368,7 +368,7 @@ static void native_flush_hash_range(unsigned long context, ...@@ -368,7 +368,7 @@ static void native_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
} else { } else {
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
spin_lock(&native_tlbie_lock); spin_lock(&native_tlbie_lock);
......
...@@ -190,7 +190,7 @@ void __init htab_initialize(void) ...@@ -190,7 +190,7 @@ void __init htab_initialize(void)
* _NOT_ map it to avoid cache paradoxes as it's remapped non * _NOT_ map it to avoid cache paradoxes as it's remapped non
* cacheable later on * cacheable later on
*/ */
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) if (cpu_has_feature(CPU_FTR_16M_PAGE))
use_largepages = 1; use_largepages = 1;
/* create bolted the linear mapping in the hash table */ /* create bolted the linear mapping in the hash table */
......
...@@ -709,7 +709,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -709,7 +709,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~HPAGE_MASK) if (len & ~HPAGE_MASK)
return -EINVAL; return -EINVAL;
if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)) if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -EINVAL; return -EINVAL;
if (test_thread_flag(TIF_32BIT)) { if (test_thread_flag(TIF_32BIT)) {
......
...@@ -752,7 +752,7 @@ void __init mem_init(void) ...@@ -752,7 +752,7 @@ void __init mem_init(void)
*/ */
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return; return;
/* avoid an atomic op if possible */ /* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &page->flags)) if (test_bit(PG_arch_1, &page->flags))
...@@ -763,7 +763,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) ...@@ -763,7 +763,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{ {
clear_page(page); clear_page(page);
if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return; return;
/* /*
* We shouldnt have to do this, but some versions of glibc * We shouldnt have to do this, but some versions of glibc
...@@ -796,7 +796,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, ...@@ -796,7 +796,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
return; return;
#endif #endif
if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return; return;
/* avoid an atomic op if possible */ /* avoid an atomic op if possible */
...@@ -832,8 +832,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -832,8 +832,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
unsigned long flags; unsigned long flags;
/* handle i-cache coherency */ /* handle i-cache coherency */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) && if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
!(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) { !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
......
...@@ -51,7 +51,7 @@ static void slb_flush_and_rebolt(void) ...@@ -51,7 +51,7 @@ static void slb_flush_and_rebolt(void)
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) if (cpu_has_feature(CPU_FTR_16M_PAGE))
ksp_flags |= SLB_VSID_L; ksp_flags |= SLB_VSID_L;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
...@@ -139,7 +139,7 @@ void slb_initialize(void) ...@@ -139,7 +139,7 @@ void slb_initialize(void)
unsigned long flags = SLB_VSID_KERNEL; unsigned long flags = SLB_VSID_KERNEL;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */ /* Invalidate the entire SLB (even slot 0) & all the ERATS */
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) if (cpu_has_feature(CPU_FTR_16M_PAGE))
flags |= SLB_VSID_L; flags |= SLB_VSID_L;
asm volatile("isync":::"memory"); asm volatile("isync":::"memory");
......
...@@ -227,7 +227,7 @@ void stab_initialize(unsigned long stab) ...@@ -227,7 +227,7 @@ void stab_initialize(unsigned long stab)
{ {
unsigned long vsid = get_kernel_vsid(KERNELBASE); unsigned long vsid = get_kernel_vsid(KERNELBASE);
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) { if (cpu_has_feature(CPU_FTR_SLB)) {
slb_initialize(); slb_initialize();
} else { } else {
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
......
...@@ -54,7 +54,7 @@ static void power4_reg_setup(struct op_counter_config *ctr, ...@@ -54,7 +54,7 @@ static void power4_reg_setup(struct op_counter_config *ctr,
* *
* It has been verified to work on POWER5 so we enable it there. * It has been verified to work on POWER5 so we enable it there.
*/ */
if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA_SIHV) if (cpu_has_feature(CPU_FTR_MMCRA_SIHV))
mmcra_has_sihv = 1; mmcra_has_sihv = 1;
/* /*
......
...@@ -114,7 +114,7 @@ static void rs64_cpu_setup(void *unused) ...@@ -114,7 +114,7 @@ static void rs64_cpu_setup(void *unused)
/* reset MMCR1, MMCRA */ /* reset MMCR1, MMCRA */
mtspr(SPRN_MMCR1, 0); mtspr(SPRN_MMCR1, 0);
if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA) if (cpu_has_feature(CPU_FTR_MMCRA))
mtspr(SPRN_MMCRA, 0); mtspr(SPRN_MMCRA, 0);
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE; mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
......
...@@ -725,7 +725,7 @@ static void insert_cpu_bpts(void) ...@@ -725,7 +725,7 @@ static void insert_cpu_bpts(void)
{ {
if (dabr.enabled) if (dabr.enabled)
set_controlled_dabr(dabr.address | (dabr.enabled & 7)); set_controlled_dabr(dabr.address | (dabr.enabled & 7));
if (iabr && (cur_cpu_spec->cpu_features & CPU_FTR_IABR)) if (iabr && cpu_has_feature(CPU_FTR_IABR))
set_iabr(iabr->address set_iabr(iabr->address
| (iabr->enabled & (BP_IABR|BP_IABR_TE))); | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
} }
...@@ -753,7 +753,7 @@ static void remove_bpts(void) ...@@ -753,7 +753,7 @@ static void remove_bpts(void)
static void remove_cpu_bpts(void) static void remove_cpu_bpts(void)
{ {
set_controlled_dabr(0); set_controlled_dabr(0);
if ((cur_cpu_spec->cpu_features & CPU_FTR_IABR)) if (cpu_has_feature(CPU_FTR_IABR))
set_iabr(0); set_iabr(0);
} }
...@@ -1100,7 +1100,7 @@ bpt_cmds(void) ...@@ -1100,7 +1100,7 @@ bpt_cmds(void)
break; break;
case 'i': /* bi - hardware instr breakpoint */ case 'i': /* bi - hardware instr breakpoint */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_IABR)) { if (!cpu_has_feature(CPU_FTR_IABR)) {
printf("Hardware instruction breakpoint " printf("Hardware instruction breakpoint "
"not supported on this cpu\n"); "not supported on this cpu\n");
break; break;
...@@ -2498,7 +2498,7 @@ void xmon_init(void) ...@@ -2498,7 +2498,7 @@ void xmon_init(void)
void dump_segments(void) void dump_segments(void)
{ {
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) if (cpu_has_feature(CPU_FTR_SLB))
dump_slb(); dump_slb();
else else
dump_stab(); dump_stab();
......
...@@ -2389,7 +2389,7 @@ pmac_suspend_devices(void) ...@@ -2389,7 +2389,7 @@ pmac_suspend_devices(void)
enable_kernel_fp(); enable_kernel_fp();
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec(); enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
......
...@@ -108,7 +108,7 @@ int raid6_have_altivec(void); ...@@ -108,7 +108,7 @@ int raid6_have_altivec(void);
int raid6_have_altivec(void) int raid6_have_altivec(void)
{ {
/* This assumes either all CPUs have Altivec or none does */ /* This assumes either all CPUs have Altivec or none does */
return cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC; return cpu_has_feature(CPU_FTR_ALTIVEC):
} }
#endif #endif
......
...@@ -61,6 +61,11 @@ struct cpu_spec { ...@@ -61,6 +61,11 @@ struct cpu_spec {
extern struct cpu_spec cpu_specs[]; extern struct cpu_spec cpu_specs[];
extern struct cpu_spec *cur_cpu_spec[]; extern struct cpu_spec *cur_cpu_spec[];
static inline unsigned int cpu_has_feature(unsigned int feature)
{
return cur_cpu_spec[0]->cpu_features & feature;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* CPU kernel features */ /* CPU kernel features */
......
...@@ -40,7 +40,7 @@ extern void __flush_dcache_icache(void *page_va); ...@@ -40,7 +40,7 @@ extern void __flush_dcache_icache(void *page_va);
static inline void flush_icache_range(unsigned long start, unsigned long stop) static inline void flush_icache_range(unsigned long start, unsigned long stop)
{ {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE)) if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
__flush_icache_range(start, stop); __flush_icache_range(start, stop);
} }
......
...@@ -66,6 +66,11 @@ struct cpu_spec { ...@@ -66,6 +66,11 @@ struct cpu_spec {
extern struct cpu_spec cpu_specs[]; extern struct cpu_spec cpu_specs[];
extern struct cpu_spec *cur_cpu_spec; extern struct cpu_spec *cur_cpu_spec;
static inline unsigned long cpu_has_feature(unsigned long feature)
{
return cur_cpu_spec->cpu_features & feature;
}
/* firmware feature bitmask values */ /* firmware feature bitmask values */
#define FIRMWARE_MAX_FEATURES 63 #define FIRMWARE_MAX_FEATURES 63
......
...@@ -59,11 +59,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -59,11 +59,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
return; return;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC) if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall"); asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) if (cpu_has_feature(CPU_FTR_SLB))
switch_slb(tsk, next); switch_slb(tsk, next);
else else
switch_stab(tsk, next); switch_stab(tsk, next);
......
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \ #define in_hugepage_area(context, addr) \
((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \ (cpu_has_feature(CPU_FTR_16M_PAGE) && \
( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
( ((addr) < 0x100000000L) && \ ( ((addr) < 0x100000000L) && \
((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) ) ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment