Commit 1bfaf6af authored by David S. Miller's avatar David S. Miller

SPARC64: Kill more cli/sti use.

parent 7475d10b
...@@ -27,17 +27,9 @@ ...@@ -27,17 +27,9 @@
.text .text
.align 64 .align 64
.globl etrap, etrap_irq, etraptl1 .globl etrap, etrap_irq, etraptl1
#ifdef CONFIG_PREEMPT
etrap_irq: ldsw [%g6 + TI_PRE_COUNT], %g1
add %g1, 1, %g1
ba,pt %xcc, etrap_irq2
stw %g1, [%g6 + TI_PRE_COUNT]
#endif
etrap: rdpr %pil, %g2 ! Single Group etrap: rdpr %pil, %g2 ! Single Group
#ifndef CONFIG_PREEMPT
etrap_irq: etrap_irq:
#endif rdpr %tstate, %g1 ! Single Group
etrap_irq2: rdpr %tstate, %g1 ! Single Group
sllx %g2, 20, %g3 ! IEU0 Group sllx %g2, 20, %g3 ! IEU0 Group
andcc %g1, TSTATE_PRIV, %g0 ! IEU1 andcc %g1, TSTATE_PRIV, %g0 ! IEU1
or %g1, %g3, %g1 ! IEU0 Group or %g1, %g3, %g1 ! IEU0 Group
......
...@@ -1055,7 +1055,7 @@ void init_timers(void (*cfunc)(int, void *, struct pt_regs *), ...@@ -1055,7 +1055,7 @@ void init_timers(void (*cfunc)(int, void *, struct pt_regs *),
: /* no outputs */ : /* no outputs */
: "r" (pstate)); : "r" (pstate));
__sti(); local_irq_enable();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -107,18 +107,6 @@ int cpu_idle(void) ...@@ -107,18 +107,6 @@ int cpu_idle(void)
#endif #endif
#ifdef CONFIG_PREEMPT
void kpreempt_maybe(void)
{
int cpu = smp_processor_id();
if (local_irq_count(cpu) == 0 &&
local_bh_count(cpu) == 0 &&
test_thread_flag(TIF_NEED_RESCHED))
schedule();
}
#endif
extern char reboot_command []; extern char reboot_command [];
#ifdef CONFIG_SUN_CONSOLE #ifdef CONFIG_SUN_CONSOLE
......
...@@ -151,11 +151,6 @@ __handle_signal: ...@@ -151,11 +151,6 @@ __handle_signal:
.align 64 .align 64
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq: rtrap_irq:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l0
sub %l0, 1, %l0
stw %l0, [%g6 + TI_PRE_COUNT]
#endif
rtrap_clr_l6: clr %l6 rtrap_clr_l6: clr %l6
rtrap: ldub [%g6 + TI_CPU], %l0 rtrap: ldub [%g6 + TI_CPU], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active sethi %hi(irq_stat), %l2 ! &softirq_active
...@@ -276,7 +271,7 @@ to_kernel: ...@@ -276,7 +271,7 @@ to_kernel:
brnz %l5, kern_fpucheck brnz %l5, kern_fpucheck
sethi %hi(PREEMPT_ACTIVE), %l6 sethi %hi(PREEMPT_ACTIVE), %l6
stw %l6, [%g6 + TI_PRE_COUNT] stw %l6, [%g6 + TI_PRE_COUNT]
call kpreempt_maybe call schedule
nop nop
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
stw %g0, [%g6 + TI_PRE_COUNT] stw %g0, [%g6 + TI_PRE_COUNT]
......
...@@ -108,7 +108,12 @@ int prom_callback(long *args) ...@@ -108,7 +108,12 @@ int prom_callback(long *args)
* case, the cpu is marked as in-interrupt. Drop IRQ locks. * case, the cpu is marked as in-interrupt. Drop IRQ locks.
*/ */
irq_exit(); irq_exit();
save_and_cli(flags);
/* XXX Revisit the locking here someday. This is a debugging
* XXX feature so it isnt all that critical. -DaveM
*/
local_irq_save(flags);
spin_unlock(&prom_entry_lock); spin_unlock(&prom_entry_lock);
cons = console_drivers; cons = console_drivers;
while (cons) { while (cons) {
...@@ -301,7 +306,8 @@ int prom_callback(long *args) ...@@ -301,7 +306,8 @@ int prom_callback(long *args)
register_console(cons); register_console(cons);
} }
spin_lock(&prom_entry_lock); spin_lock(&prom_entry_lock);
restore_flags(flags); local_irq_restore(flags);
/* /*
* Restore in-interrupt status for a resume from obp. * Restore in-interrupt status for a resume from obp.
*/ */
......
...@@ -1294,6 +1294,8 @@ static void __init smp_tune_scheduling(void) ...@@ -1294,6 +1294,8 @@ static void __init smp_tune_scheduling(void)
} }
/* /proc/profile writes can call this, don't __init it please. */ /* /proc/profile writes can call this, don't __init it please. */
static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)
{ {
unsigned long flags; unsigned long flags;
...@@ -1302,11 +1304,11 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1302,11 +1304,11 @@ int setup_profiling_timer(unsigned int multiplier)
if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
return -EINVAL; return -EINVAL;
save_and_cli(flags); spin_lock_irqsave(&prof_setup_lock, flags);
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
prof_multiplier(i) = multiplier; prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier); current_tick_offset = (timer_tick_offset / multiplier);
restore_flags(flags); spin_unlock_irqrestore(&prof_setup_lock, flags);
return 0; return 0;
} }
...@@ -320,15 +320,19 @@ __asm__ __volatile__ ( \ ...@@ -320,15 +320,19 @@ __asm__ __volatile__ ( \
store_common(dst_addr, size, src_val, asi, errh); \ store_common(dst_addr, size, src_val, asi, errh); \
}) })
/* XXX Need to capture/release other cpu's for SMP around this. */ extern void smp_capture(void);
extern void smp_release(void);
#define do_atomic(srcdest_reg, mem, errh) ({ \ #define do_atomic(srcdest_reg, mem, errh) ({ \
unsigned long flags, tmp; \ unsigned long flags, tmp; \
\ \
save_and_cli(flags); \ smp_capture(); \
local_irq_save(flags); \
tmp = *srcdest_reg; \ tmp = *srcdest_reg; \
do_integer_load(srcdest_reg, 4, mem, 0, errh); \ do_integer_load(srcdest_reg, 4, mem, 0, errh); \
store_common(mem, 4, &tmp, errh); \ store_common(mem, 4, &tmp, errh); \
restore_flags(flags); \ local_irq_restore(flags); \
smp_release(); \
}) })
static inline void advance(struct pt_regs *regs) static inline void advance(struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment