Commit fc51299f authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: Kill SPARC64_USE_STICK and use real timer drivers.

parent b13e3088
......@@ -719,12 +719,8 @@ void handler_irq(int irq, struct pt_regs *regs)
*/
{
unsigned long clr_mask = 1 << irq;
unsigned long tick_mask;
unsigned long tick_mask = tick_ops->softint_mask;
if (SPARC64_USE_STICK)
tick_mask = (1UL << 16);
else
tick_mask = (1UL << 0);
if ((irq == 14) && (get_softint() & tick_mask)) {
irq = 0;
clr_mask = tick_mask;
......
......@@ -115,7 +115,6 @@ extern void cpu_probe(void);
void __init smp_callin(void)
{
int cpuid = hard_smp_processor_id();
unsigned long pstate;
extern int bigkernel;
extern unsigned long kern_locked_tte_data;
......@@ -133,50 +132,6 @@ void __init smp_callin(void)
cpu_probe();
/* Guarentee that the following sequences execute
* uninterrupted.
*/
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__ __volatile__(
"sethi %%hi(0x80000000), %%g1\n\t"
"ba,pt %%xcc, 1f\n\t"
" sllx %%g1, 32, %%g1\n\t"
".align 64\n"
"1: rd %%tick, %%g2\n\t"
"add %%g2, 6, %%g2\n\t"
"andn %%g2, %%g1, %%g2\n\t"
"wrpr %%g2, 0, %%tick\n\t"
"rdpr %%tick, %%g0"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
if (SPARC64_USE_STICK) {
/* Let the user get at STICK too. */
__asm__ __volatile__(
"sethi %%hi(0x80000000), %%g1\n\t"
"sllx %%g1, 32, %%g1\n\t"
"rd %%asr24, %%g2\n\t"
"andn %%g2, %%g1, %%g2\n\t"
"wr %%g2, 0, %%asr24"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
}
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: /* no outputs */
: "r" (pstate));
smp_setup_percpu_timer();
local_irq_enable();
......@@ -211,7 +166,7 @@ void cpu_panic(void)
static unsigned long current_tick_offset;
/* This stick register synchronization scheme is taken entirely from
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
* The only change I've made is to rework it so that the master
......@@ -227,16 +182,7 @@ static unsigned long current_tick_offset;
static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
static unsigned long go[SLAVE + 1];
#define DEBUG_STICK_SYNC 0
static inline unsigned long get_stick(void)
{
unsigned long val;
__asm__ __volatile__("rd %%asr24, %0"
: "=r" (val));
return val;
}
#define DEBUG_TICK_SYNC 0
static inline long get_delta (long *rt, long *master)
{
......@@ -245,14 +191,14 @@ static inline long get_delta (long *rt, long *master)
unsigned long i;
for (i = 0; i < NUM_ITERS; i++) {
t0 = get_stick();
t0 = tick_ops->get_tick();
go[MASTER] = 1;
membar("#StoreLoad");
while (!(tm = go[SLAVE]))
membar("#LoadLoad");
go[SLAVE] = 0;
membar("#StoreStore");
t1 = get_stick();
t1 = tick_ops->get_tick();
if (t1 - t0 < best_t1 - best_t0)
best_t0 = t0, best_t1 = t1, best_tm = tm;
......@@ -268,32 +214,11 @@ static inline long get_delta (long *rt, long *master)
return tcenter - best_tm;
}
static void adjust_stick(long adj)
{
unsigned long tmp, pstate;
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"ba,pt %%xcc, 1f\n\t"
" wrpr %0, %4, %%pstate\n\t"
".align 16\n\t"
"1:nop\n\t"
"rd %%asr24, %1\n\t"
"add %1, %2, %1\n\t"
"wr %1, 0x0, %%asr24\n\t"
"add %1, %3, %1\n\t"
"wr %1, 0x0, %%asr25\n\t"
"wrpr %0, 0x0, %%pstate"
: "=&r" (pstate), "=&r" (tmp)
: "r" (adj), "r" (current_tick_offset),
"i" (PSTATE_IE));
}
void smp_synchronize_stick_client(void)
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp, bound;
#if DEBUG_STICK_SYNC
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
long master; /* master's timestamp */
......@@ -323,9 +248,9 @@ void smp_synchronize_stick_client(void)
} else
adj = -delta;
adjust_stick(adj);
tick_ops->add_tick(adj, current_tick_offset);
}
#if DEBUG_STICK_SYNC
#if DEBUG_TICK_SYNC
t[i].rt = rt;
t[i].master = master_time_stamp;
t[i].diff = delta;
......@@ -335,25 +260,25 @@ void smp_synchronize_stick_client(void)
}
local_irq_restore(flags);
#if DEBUG_STICK_SYNC
#if DEBUG_TICK_SYNC
for (i = 0; i < NUM_ROUNDS; i++)
printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif
printk(KERN_INFO "CPU %d: synchronized STICK with master CPU (last diff %ld cycles,"
printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
"maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
}
static void smp_start_sync_stick_client(int cpu);
static void smp_start_sync_tick_client(int cpu);
static void smp_synchronize_one_stick(int cpu)
static void smp_synchronize_one_tick(int cpu)
{
unsigned long flags, i;
go[MASTER] = 0;
smp_start_sync_stick_client(cpu);
smp_start_sync_tick_client(cpu);
/* wait for client to be ready */
while (!go[MASTER])
......@@ -370,7 +295,7 @@ static void smp_synchronize_one_stick(int cpu)
membar("#LoadLoad");
go[MASTER] = 0;
membar("#StoreStore");
go[SLAVE] = get_stick();
go[SLAVE] = tick_ops->get_tick();
membar("#StoreLoad");
}
}
......@@ -638,11 +563,11 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
/* NOTE: Caller runs local copy on master. */
}
extern unsigned long xcall_sync_stick;
extern unsigned long xcall_sync_tick;
static void smp_start_sync_stick_client(int cpu)
static void smp_start_sync_tick_client(int cpu)
{
smp_cross_call_masked(&xcall_sync_stick,
smp_cross_call_masked(&xcall_sync_tick,
0, 0, 0,
(1UL << cpu));
}
......@@ -1118,12 +1043,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
* Check for level 14 softint.
*/
{
unsigned long tick_mask;
if (SPARC64_USE_STICK)
tick_mask = (1UL << 16);
else
tick_mask = (1UL << 0);
unsigned long tick_mask = tick_ops->softint_mask;
if (!(get_softint() & tick_mask)) {
extern void handler_irq(int, struct pt_regs *);
......@@ -1159,41 +1079,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
: "=r" (pstate)
: "i" (PSTATE_IE));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*
* Just to be anal we add a workaround for Spitfire
* Errata 50 by preventing pipeline bypasses on the
* final read of the %tick register into a compare
* instruction. The Errata 50 description states
* that %tick is not prone to this bug, but I am not
* taking any chances.
*/
if (!SPARC64_USE_STICK) {
__asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
"ba,pt %%xcc, 1f\n\t"
" add %0, %2, %0\n\t"
".align 64\n"
"1: wr %0, 0x0, %%tick_cmpr\n\t"
"rd %%tick_cmpr, %%g0\n\t"
"rd %%tick, %1\n\t"
"mov %1, %1"
: "=&r" (compare), "=r" (tick)
: "r" (current_tick_offset));
} else {
__asm__ __volatile__("rd %%asr25, %0\n\t"
"add %0, %2, %0\n\t"
"wr %0, 0x0, %%asr25\n\t"
"rd %%asr24, %1\n\t"
: "=&r" (compare), "=r" (tick)
: "r" (current_tick_offset));
}
compare = tick_ops->add_compare(current_tick_offset);
tick = tick_ops->get_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
......@@ -1217,35 +1104,7 @@ static void __init smp_setup_percpu_timer(void)
: "=r" (pstate)
: "i" (PSTATE_IE));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
if (!SPARC64_USE_STICK) {
__asm__ __volatile__(
"rd %%tick, %%g1\n\t"
"ba,pt %%xcc, 1f\n\t"
" add %%g1, %0, %%g1\n\t"
".align 64\n"
"1: wr %%g1, 0x0, %%tick_cmpr\n\t"
"rd %%tick_cmpr, %%g0"
: /* no outputs */
: "r" (current_tick_offset)
: "g1");
} else {
__asm__ __volatile__(
"rd %%asr24, %%g1\n\t"
"add %%g1, %0, %%g1\n\t"
"wr %%g1, 0x0, %%asr25"
: /* no outputs */
: "r" (current_tick_offset)
: "g1");
}
tick_ops->init_tick(current_tick_offset);
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
......@@ -1314,44 +1173,23 @@ static void __init smp_tune_scheduling(void)
p += (64 / sizeof(unsigned long)))
*((volatile unsigned long *)p);
/* Now the real measurement. */
if (!SPARC64_USE_STICK) {
__asm__ __volatile__("b,pt %%xcc, 1f\n\t"
" rd %%tick, %0\n\t"
".align 64\n"
"1:\tldx [%2 + 0x000], %%g1\n\t"
"ldx [%2 + 0x040], %%g2\n\t"
"ldx [%2 + 0x080], %%g3\n\t"
"ldx [%2 + 0x0c0], %%g5\n\t"
"add %2, 0x100, %2\n\t"
"cmp %2, %4\n\t"
"bne,pt %%xcc, 1b\n\t"
" nop\n\t"
"rd %%tick, %1\n\t"
: "=&r" (tick1), "=&r" (tick2),
"=&r" (flush_base)
: "2" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5");
} else {
__asm__ __volatile__("b,pt %%xcc, 1f\n\t"
" rd %%asr24, %0\n\t"
".align 64\n"
"1:\tldx [%2 + 0x000], %%g1\n\t"
"ldx [%2 + 0x040], %%g2\n\t"
"ldx [%2 + 0x080], %%g3\n\t"
"ldx [%2 + 0x0c0], %%g5\n\t"
"add %2, 0x100, %2\n\t"
"cmp %2, %4\n\t"
tick1 = tick_ops->get_tick();
__asm__ __volatile__("1:\n\t"
"ldx [%0 + 0x000], %%g1\n\t"
"ldx [%0 + 0x040], %%g2\n\t"
"ldx [%0 + 0x080], %%g3\n\t"
"ldx [%0 + 0x0c0], %%g5\n\t"
"add %0, 0x100, %0\n\t"
"cmp %0, %2\n\t"
"bne,pt %%xcc, 1b\n\t"
" nop\n\t"
"rd %%asr24, %1\n\t"
: "=&r" (tick1), "=&r" (tick2),
"=&r" (flush_base)
: "2" (flush_base),
" nop"
: "=&r" (flush_base)
: "0" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5");
}
tick2 = tick_ops->get_tick();
local_irq_restore(flags);
......@@ -1438,8 +1276,7 @@ int __devinit __cpu_up(unsigned int cpu)
if (!test_bit(cpu, &cpu_online_map)) {
ret = -ENODEV;
} else {
if (SPARC64_USE_STICK)
smp_synchronize_one_stick(cpu);
smp_synchronize_one_tick(cpu);
}
}
return ret;
......
......@@ -54,6 +54,203 @@ static unsigned long mstk48t59_regs = 0UL;
static int set_rtc_mmss(unsigned long);
struct sparc64_tick_ops *tick_ops;
static void tick_disable_protection(void)
{
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__ __volatile__(
" sethi %%hi(0x80000000), %%g1\n"
" ba,pt %%xcc, 1f\n"
" sllx %%g1, 32, %%g1\n"
" .align 64\n"
"1: rd %%tick, %%g2\n"
" add %%g2, 6, %%g2\n"
" andn %%g2, %%g1, %%g2\n"
" wrpr %%g2, 0, %%tick\n"
" rdpr %%tick, %%g0"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
}
static void tick_init_tick(unsigned long offset)
{
tick_disable_protection();
__asm__ __volatile__(
" rd %%tick, %%g1\n"
" ba,pt %%xcc, 1f\n"
" add %%g1, %0, %%g1\n"
" .align 64\n"
"1: wr %%g1, 0x0, %%tick_cmpr\n"
" rd %%tick_cmpr, %%g0"
: /* no outputs */
: "r" (offset)
: "g1");
}
static unsigned long tick_get_tick(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%tick, %0\n\t"
"mov %0, %0"
: "=r" (ret));
return ret;
}
static unsigned long tick_get_compare(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
"mov %0, %0"
: "=r" (ret));
return ret;
}
static unsigned long tick_add_compare(unsigned long adj)
{
unsigned long new_compare;
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
__asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
"ba,pt %%xcc, 1f\n\t"
" add %0, %1, %0\n\t"
".align 64\n"
"1:\n\t"
"wr %0, 0, %%tick_cmpr\n\t"
"rd %%tick_cmpr, %%g0"
: "=&r" (new_compare)
: "r" (adj));
return new_compare;
}
static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
{
unsigned long new_tick, tmp;
/* Also need to handle Blackbird bug here too. */
__asm__ __volatile__("rd %%tick, %0\n\t"
"add %0, %2, %0\n\t"
"wrpr %0, 0, %%tick\n\t"
"ba,pt %%xcc, 1f\n\t"
" add %0, %3, %1\n\t"
".align 64\n"
"1:\n\t"
"wr %1, 0, %%tick_cmpr\n\t"
"rd %%tick_cmpr, %%g0"
: "=&r" (new_tick), "=&r" (tmp)
: "r" (adj), "r" (offset));
return new_tick;
}
static struct sparc64_tick_ops tick_operations = {
.init_tick = tick_init_tick,
.get_tick = tick_get_tick,
.get_compare = tick_get_compare,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
.softint_mask = 1UL << 0,
};
static void stick_init_tick(unsigned long offset)
{
tick_disable_protection();
/* Let the user get at STICK too. */
__asm__ __volatile__(
" sethi %%hi(0x80000000), %%g1\n"
" sllx %%g1, 32, %%g1\n"
" rd %%asr24, %%g2\n"
" andn %%g2, %%g1, %%g2\n"
" wr %%g2, 0, %%asr24"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
__asm__ __volatile__(
" rd %%asr24, %%g1\n"
" add %%g1, %0, %%g1\n"
" wr %%g1, 0x0, %%asr25"
: /* no outputs */
: "r" (offset)
: "g1");
}
static unsigned long stick_get_tick(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%asr24, %0"
: "=r" (ret));
return ret;
}
static unsigned long stick_get_compare(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%asr25, %0"
: "=r" (ret));
return ret;
}
static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
{
unsigned long new_tick, tmp;
__asm__ __volatile__("rd %%asr24, %0\n\t"
"add %0, %2, %0\n\t"
"wr %0, 0, %%asr24\n\t"
"add %0, %3, %1\n\t"
"wr %1, 0, %%asr25"
: "=&r" (new_tick), "=&r" (tmp)
: "r" (adj), "r" (offset));
return new_tick;
}
static unsigned long stick_add_compare(unsigned long adj)
{
unsigned long new_compare;
__asm__ __volatile__("rd %%asr25, %0\n\t"
"add %0, %1, %0\n\t"
"wr %0, 0, %%asr25"
: "=&r" (new_compare)
: "r" (adj));
return new_compare;
}
static struct sparc64_tick_ops stick_operations = {
.init_tick = stick_init_tick,
.get_tick = stick_get_tick,
.get_compare = stick_get_compare,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
.softint_mask = 1UL << 16,
};
/* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*
......@@ -146,43 +343,8 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
: "=r" (pstate)
: "i" (PSTATE_IE));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*
* Just to be anal we add a workaround for Spitfire
* Errata 50 by preventing pipeline bypasses on the
* final read of the %tick register into a compare
* instruction. The Errata 50 description states
* that %tick is not prone to this bug, but I am not
* taking any chances.
*/
if (!SPARC64_USE_STICK) {
__asm__ __volatile__(
" rd %%tick_cmpr, %0\n"
" ba,pt %%xcc, 1f\n"
" add %0, %2, %0\n"
" .align 64\n"
"1: wr %0, 0, %%tick_cmpr\n"
" rd %%tick_cmpr, %%g0\n"
" rd %%tick, %1\n"
" mov %1, %1"
: "=&r" (timer_tick_compare), "=r" (ticks)
: "r" (timer_tick_offset));
} else {
__asm__ __volatile__(
" rd %%asr25, %0\n"
" add %0, %2, %0\n"
" wr %0, 0, %%asr25\n"
" rd %%asr24, %1"
: "=&r" (timer_tick_compare), "=r" (ticks)
: "r" (timer_tick_offset));
}
timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
ticks = tick_ops->get_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
......@@ -205,19 +367,7 @@ void timer_tick_interrupt(struct pt_regs *regs)
/*
* Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
*/
if (!SPARC64_USE_STICK) {
__asm__ __volatile__(
" rd %%tick_cmpr, %0\n"
" add %0, %1, %0"
: "=&r" (timer_tick_compare)
: "r" (timer_tick_offset));
} else {
__asm__ __volatile__(
" rd %%asr25, %0\n"
" add %0, %1, %0"
: "=&r" (timer_tick_compare)
: "r" (timer_tick_offset));
}
timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
timer_check_rtc();
......@@ -621,24 +771,25 @@ void __init clock_probe(void)
}
/* This is gets the master TICK_INT timer going. */
static void sparc64_init_timers(void (*cfunc)(int, void *, struct pt_regs *),
unsigned long *clock)
static unsigned long sparc64_init_timers(void (*cfunc)(int, void *, struct pt_regs *))
{
unsigned long pstate;
extern unsigned long timer_tick_offset;
unsigned long pstate, clock;
int node, err;
#ifdef CONFIG_SMP
extern void smp_tick_init(void);
#endif
if (!SPARC64_USE_STICK) {
if (tlb_type == spitfire) {
tick_ops = &tick_operations;
node = linux_cpus[0].prom_node;
*clock = prom_getint(node, "clock-frequency");
clock = prom_getint(node, "clock-frequency");
} else {
tick_ops = &stick_operations;
node = prom_root_node;
*clock = prom_getint(node, "stick-frequency");
clock = prom_getint(node, "stick-frequency");
}
timer_tick_offset = *clock / HZ;
timer_tick_offset = clock / HZ;
#ifdef CONFIG_SMP
smp_tick_init();
#endif
......@@ -660,64 +811,7 @@ static void sparc64_init_timers(void (*cfunc)(int, void *, struct pt_regs *),
: "=r" (pstate)
: "i" (PSTATE_IE));
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__ __volatile__(
" sethi %%hi(0x80000000), %%g1\n"
" ba,pt %%xcc, 1f\n"
" sllx %%g1, 32, %%g1\n"
" .align 64\n"
"1: rd %%tick, %%g2\n"
" add %%g2, 6, %%g2\n"
" andn %%g2, %%g1, %%g2\n"
" wrpr %%g2, 0, %%tick\n"
" rdpr %%tick, %%g0"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
if (!SPARC64_USE_STICK) {
__asm__ __volatile__(
" rd %%tick, %%g1\n"
" ba,pt %%xcc, 1f\n"
" add %%g1, %0, %%g1\n"
" .align 64\n"
"1: wr %%g1, 0x0, %%tick_cmpr\n"
" rd %%tick_cmpr, %%g0"
: /* no outputs */
: "r" (timer_tick_offset)
: "g1");
} else {
/* Let the user get at STICK too. */
__asm__ __volatile__(
" sethi %%hi(0x80000000), %%g1\n"
" sllx %%g1, 32, %%g1\n"
" rd %%asr24, %%g2\n"
" andn %%g2, %%g1, %%g2\n"
" wr %%g2, 0, %%asr24"
: /* no outputs */
: /* no inputs */
: "g1", "g2");
__asm__ __volatile__(
" rd %%asr24, %%g1\n"
" add %%g1, %0, %%g1\n"
" wr %%g1, 0x0, %%asr25"
: /* no outputs */
: "r" (timer_tick_offset)
: "g1");
}
tick_ops->init_tick(timer_tick_offset);
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
......@@ -725,40 +819,23 @@ static void sparc64_init_timers(void (*cfunc)(int, void *, struct pt_regs *),
: "r" (pstate));
local_irq_enable();
return clock;
}
void __init time_init(void)
{
/* clock_probe() is now done at end of [se]bus_init on sparc64
* so that sbus, fhc and ebus bus information is probed and
* available.
*/
unsigned long clock;
unsigned long clock = sparc64_init_timers(timer_interrupt);
sparc64_init_timers(timer_interrupt, &clock);
timer_ticks_per_usec_quotient = ((1UL<<32) / (clock / 1000020));
}
static __inline__ unsigned long do_gettimeoffset(void)
{
unsigned long ticks;
unsigned long ticks = tick_ops->get_tick();
if (!SPARC64_USE_STICK) {
__asm__ __volatile__(
" rd %%tick, %%g1\n"
" add %1, %%g1, %0\n"
" sub %0, %2, %0\n"
: "=r" (ticks)
: "r" (timer_tick_offset), "r" (timer_tick_compare)
: "g1", "g2");
} else {
__asm__ __volatile__("rd %%asr24, %%g1\n\t"
"add %1, %%g1, %0\n\t"
"sub %0, %2, %0\n\t"
: "=&r" (ticks)
: "r" (timer_tick_offset), "r" (timer_tick_compare)
: "g1");
}
ticks += timer_tick_offset;
ticks -= timer_tick_compare;
return (ticks * timer_ticks_per_usec_quotient) >> 32UL;
}
......
......@@ -560,8 +560,8 @@ xcall_flush_tlb_kernel_range:
/* This runs in a very controlled environment, so we do
* not need to worry about BH races etc.
*/
.globl xcall_sync_stick
xcall_sync_stick:
.globl xcall_sync_tick
xcall_sync_tick:
rdpr %pstate, %g2
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
rdpr %pil, %g2
......@@ -569,7 +569,7 @@ xcall_sync_stick:
sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
109: or %g7, %lo(109b), %g7
call smp_synchronize_stick_client
call smp_synchronize_tick_client
nop
clr %l6
b rtrap_xcall
......
......@@ -45,8 +45,6 @@ enum ultra_tlb_layout {
extern enum ultra_tlb_layout tlb_type;
#define SPARC64_USE_STICK (tlb_type != spitfire)
#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
#define L1DCACHE_SIZE 0x4000
......
......@@ -50,6 +50,17 @@ struct sun5_timer {
*/
#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
struct sparc64_tick_ops {
void (*init_tick)(unsigned long);
unsigned long (*get_tick)(void);
unsigned long (*get_compare)(void);
unsigned long (*add_tick)(unsigned long, unsigned long);
unsigned long (*add_compare)(unsigned long);
unsigned long softint_mask;
};
extern struct sparc64_tick_ops *tick_ops;
#ifdef CONFIG_SMP
extern unsigned long timer_tick_offset;
extern void timer_tick_interrupt(struct pt_regs *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment