Commit 8b99cfb8 authored by David S. Miller's avatar David S. Miller

[SPARC64]: More sensible udelay implementation.

Take a page from the powerpc folks and just calculate the
delay factor directly.

Since frequency scaling chips use a system-tick register,
the value is going to be the same system-wide.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 27a2ef38
...@@ -442,7 +442,6 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ...@@ -442,7 +442,6 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"D$ parity tl1\t: %u\n" "D$ parity tl1\t: %u\n"
"I$ parity tl1\t: %u\n" "I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
"Cpu0Bogo\t: %lu.%02lu\n"
"Cpu0ClkTck\t: %016lx\n" "Cpu0ClkTck\t: %016lx\n"
#endif #endif
, ,
...@@ -457,8 +456,6 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ...@@ -457,8 +456,6 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
dcache_parity_tl1_occurred, dcache_parity_tl1_occurred,
icache_parity_tl1_occurred icache_parity_tl1_occurred
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
, cpu_data(0).udelay_val/(500000/HZ),
(cpu_data(0).udelay_val/(5000/HZ)) % 100,
cpu_data(0).clock_tick cpu_data(0).clock_tick
#endif #endif
); );
......
...@@ -49,9 +49,6 @@ extern void calibrate_delay(void); ...@@ -49,9 +49,6 @@ extern void calibrate_delay(void);
int sparc64_multi_core __read_mostly; int sparc64_multi_core __read_mostly;
/* Please don't make this stuff initdata!!! --DaveM */
unsigned char boot_cpu_id;
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
...@@ -82,10 +79,7 @@ void smp_bogo(struct seq_file *m) ...@@ -82,10 +79,7 @@ void smp_bogo(struct seq_file *m)
for_each_online_cpu(i) for_each_online_cpu(i)
seq_printf(m, seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n"
"Cpu%dClkTck\t: %016lx\n", "Cpu%dClkTck\t: %016lx\n",
i, cpu_data(i).udelay_val / (500000/HZ),
(cpu_data(i).udelay_val / (5000/HZ)) % 100,
i, cpu_data(i).clock_tick); i, cpu_data(i).clock_tick);
} }
...@@ -112,8 +106,6 @@ void __devinit smp_callin(void) ...@@ -112,8 +106,6 @@ void __devinit smp_callin(void)
local_irq_enable(); local_irq_enable();
calibrate_delay();
cpu_data(cpuid).udelay_val = loops_per_jiffy;
callin_flag = 1; callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t" __asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory"); "flush %%g6" : : : "memory");
...@@ -1231,11 +1223,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) ...@@ -1231,11 +1223,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable(); preempt_enable();
} }
void __init smp_tick_init(void)
{
boot_cpu_id = hard_smp_processor_id();
}
/* /proc/profile writes can call this, don't __init it please. */ /* /proc/profile writes can call this, don't __init it please. */
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)
{ {
...@@ -1244,7 +1231,6 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1244,7 +1231,6 @@ int setup_profiling_timer(unsigned int multiplier)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
} }
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_prepare_boot_cpu(void)
...@@ -1323,16 +1309,6 @@ void __cpu_die(unsigned int cpu) ...@@ -1323,16 +1309,6 @@ void __cpu_die(unsigned int cpu)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
unsigned long bogosum = 0;
int i;
for_each_online_cpu(i)
bogosum += cpu_data(i).udelay_val;
printk("Total of %ld processors activated "
"(%lu.%02lu BogoMIPS).\n",
(long) num_online_cpus(),
bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
} }
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
......
/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $ /* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/ */
...@@ -28,7 +27,6 @@ ...@@ -28,7 +27,6 @@
#include <net/compat.h> #include <net/compat.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/delay.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/auxio.h> #include <asm/auxio.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -326,12 +324,6 @@ EXPORT_SYMBOL(memset); ...@@ -326,12 +324,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strncmp);
/* Delay routines. */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__delay);
void VISenter(void); void VISenter(void);
/* RAID code needs this */ /* RAID code needs this */
EXPORT_SYMBOL(VISenter); EXPORT_SYMBOL(VISenter);
......
...@@ -193,7 +193,6 @@ static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ ...@@ -193,7 +193,6 @@ static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
} }
SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val);
SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
...@@ -203,7 +202,6 @@ SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); ...@@ -203,7 +202,6 @@ SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
static struct sysdev_attribute cpu_core_attrs[] = { static struct sysdev_attribute cpu_core_attrs[] = {
_SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL),
_SYSDEV_ATTR(udelay_val, 0444, show_udelay_val, NULL),
_SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
_SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
_SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
......
...@@ -849,9 +849,6 @@ static unsigned long sparc64_init_timers(void) ...@@ -849,9 +849,6 @@ static unsigned long sparc64_init_timers(void)
{ {
struct device_node *dp; struct device_node *dp;
unsigned long clock; unsigned long clock;
#ifdef CONFIG_SMP
extern void smp_tick_init(void);
#endif
dp = of_find_node_by_path("/"); dp = of_find_node_by_path("/");
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
...@@ -874,10 +871,6 @@ static unsigned long sparc64_init_timers(void) ...@@ -874,10 +871,6 @@ static unsigned long sparc64_init_timers(void)
clock = of_getintprop_default(dp, "stick-frequency", 0); clock = of_getintprop_default(dp, "stick-frequency", 0);
} }
#ifdef CONFIG_SMP
smp_tick_init();
#endif
return clock; return clock;
} }
...@@ -1038,10 +1031,31 @@ static void __init setup_clockevent_multiplier(unsigned long hz) ...@@ -1038,10 +1031,31 @@ static void __init setup_clockevent_multiplier(unsigned long hz)
sparc64_clockevent.mult = mult; sparc64_clockevent.mult = mult;
} }
static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
unsigned long bclock, now;
bclock = tick_ops->get_tick();
do {
now = tick_ops->get_tick();
} while ((now-bclock) < loops);
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);
void __init time_init(void) void __init time_init(void)
{ {
unsigned long clock = sparc64_init_timers(); unsigned long clock = sparc64_init_timers();
tb_ticks_per_usec = clock / USEC_PER_SEC;
timer_ticks_per_nsec_quotient = timer_ticks_per_nsec_quotient =
clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT); clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
......
...@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ ...@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
NGpage.o NGbzero.o \ NGpage.o NGbzero.o \
copy_in_user.o user_fixup.o memmove.o \ copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o delay.o mcount.o ipcsum.o rwsem.o xor.o
obj-y += iomap.o obj-y += iomap.o
/* delay.c: Delay loops for sparc64
*
* Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net>
*
* Based heavily upon x86 variant which is:
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
#include <linux/delay.h>
#include <asm/timer.h>
void __delay(unsigned long loops)
{
unsigned long bclock, now;
bclock = tick_ops->get_tick();
do {
now = tick_ops->get_tick();
} while ((now-bclock) < loops);
}
/* We used to multiply by HZ after shifting down by 32 bits
* but that runs into problems for higher values of HZ and
* slow cpus.
*/
void __const_udelay(unsigned long n)
{
n *= 4;
n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
n >>= 32;
__delay(n + 1);
}
void __udelay(unsigned long n)
{
__const_udelay(n * 0x10c7UL);
}
void __ndelay(unsigned long n)
{
__const_udelay(n * 0x5UL);
}
...@@ -4,12 +4,7 @@ ...@@ -4,12 +4,7 @@
*/ */
#include <asm/sstate.h> #include <asm/sstate.h>
extern unsigned long loops_per_jiffy;
static void __init check_bugs(void) static void __init check_bugs(void)
{ {
#ifndef CONFIG_SMP
cpu_data(0).udelay_val = loops_per_jiffy;
#endif
sstate_running(); sstate_running();
} }
...@@ -19,7 +19,7 @@ typedef struct { ...@@ -19,7 +19,7 @@ typedef struct {
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __pad0; unsigned int __pad0;
unsigned long clock_tick; /* %tick's per second */ unsigned long clock_tick; /* %tick's per second */
unsigned long udelay_val; unsigned long __pad;
unsigned int __pad1; unsigned int __pad1;
unsigned int __pad2; unsigned int __pad2;
......
/* delay.h: Linux delay routines on sparc64. /* delay.h: Linux delay routines on sparc64.
* *
* Copyright (C) 1996, 2004 David S. Miller (davem@davemloft.net). * Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
*
* Based heavily upon x86 variant which is:
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/sparc64/lib/delay.c
*/ */
#ifndef __SPARC64_DELAY_H #ifndef _SPARC64_DELAY_H
#define __SPARC64_DELAY_H #define _SPARC64_DELAY_H
#include <linux/param.h>
#include <asm/cpudata.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops); extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
#define udelay(n) (__builtin_constant_p(n) ? \ #define mdelay(n) udelay((n) * 1000)
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
__udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* defined(__SPARC64_DELAY_H) */ #endif /* _SPARC64_DELAY_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment