Commit eaa5a84d authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

Merge.

parents 8fb7045e bedd4ac0
...@@ -350,21 +350,15 @@ smp4m_ticker: ...@@ -350,21 +350,15 @@ smp4m_ticker:
* for cross calls. That has a separate entry point below. * for cross calls. That has a separate entry point below.
*/ */
maybe_smp4m_msg: maybe_smp4m_msg:
GET_PROCESSOR_MID(o3) GET_PROCESSOR4M_ID(o3)
set sun4m_interrupts, %l5 set sun4m_interrupts, %l5
ld [%l5], %o5 ld [%l5], %o5
sethi %hi(0x60000000), %o4 sethi %hi(0x40000000), %o2
sll %o3, 12, %o3 sll %o3, 12, %o3
ld [%o5 + %o3], %o1 ld [%o5 + %o3], %o1
andcc %o1, %o4, %g0 andcc %o1, %o2, %g0
be,a smp4m_ticker be,a smp4m_ticker
cmp %l7, 14 cmp %l7, 14
sethi %hi(0x40000000), %o2
add %o5, %o3, %o5
andcc %o1, %o2, %g0
be,a 1f
sethi %hi(0x20000000), %o2
1:
st %o2, [%o5 + 0x4] st %o2, [%o5 + 0x4]
WRITE_PAUSE WRITE_PAUSE
ld [%o5], %g0 ld [%o5], %g0
...@@ -374,15 +368,9 @@ maybe_smp4m_msg: ...@@ -374,15 +368,9 @@ maybe_smp4m_msg:
WRITE_PAUSE WRITE_PAUSE
wr %l4, PSR_ET, %psr wr %l4, PSR_ET, %psr
WRITE_PAUSE WRITE_PAUSE
srl %o2, (16+14), %o2
tst %o2
bne 2f
nop
call smp_reschedule_irq call smp_reschedule_irq
add %o7, 8, %o7
2:
call smp_stop_cpu_irq
nop nop
RESTORE_ALL RESTORE_ALL
.align 4 .align 4
...@@ -390,7 +378,7 @@ maybe_smp4m_msg: ...@@ -390,7 +378,7 @@ maybe_smp4m_msg:
linux_trap_ipi15_sun4m: linux_trap_ipi15_sun4m:
SAVE_ALL SAVE_ALL
sethi %hi(0x80000000), %o2 sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0) GET_PROCESSOR4M_ID(o0)
set sun4m_interrupts, %l5 set sun4m_interrupts, %l5
ld [%l5], %o5 ld [%l5], %o5
sll %o0, 12, %o0 sll %o0, 12, %o0
......
...@@ -746,9 +746,6 @@ go_to_highmem: ...@@ -746,9 +746,6 @@ go_to_highmem:
jmpl %g1, %g0 jmpl %g1, %g0
nop nop
/* This is to align init_thread_union properly, be careful. -DaveM */
.align 8192
/* The code above should be at beginning and we have to take care about /* The code above should be at beginning and we have to take care about
* short jumps, as branching to .text.init section from .text is usually * short jumps, as branching to .text.init section from .text is usually
* impossible */ * impossible */
......
...@@ -21,5 +21,7 @@ EXPORT_SYMBOL(init_task); ...@@ -21,5 +21,7 @@ EXPORT_SYMBOL(init_task);
* If this is not aligned on a 8k boundry, then you should change code * If this is not aligned on a 8k boundry, then you should change code
* in etrap.S which assumes it. * in etrap.S which assumes it.
*/ */
__asm__(".section \".text\",#alloc\n"); union thread_union init_thread_union
union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) }; __attribute__((section (".text")))
__attribute__((aligned (THREAD_SIZE)))
= { INIT_THREAD_INFO(init_task) };
...@@ -183,7 +183,7 @@ EXPORT_SYMBOL(io_remap_page_range); ...@@ -183,7 +183,7 @@ EXPORT_SYMBOL(io_remap_page_range);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32)); EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else #else
EXPORT_SYMBOL(BTFIXUP_CALL(__smp_processor_id)); EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif #endif
EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq)); EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq)); EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
......
...@@ -7,5 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02 ...@@ -7,5 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o atomic32.o bitops.o debuglocks.o \ copy_user.o locks.o atomic.o atomic32.o bitops.o \
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/system.h> #include <asm/system.h>
/* To enable this code, just define SPIN_LOCK_DEBUG in asm/spinlock.h */ #ifdef CONFIG_SMP
#ifdef SPIN_LOCK_DEBUG
/* Some notes on how these debugging routines work. When a lock is acquired /* Some notes on how these debugging routines work. When a lock is acquired
* an extra debugging member lock->owner_pc is set to the caller of the lock * an extra debugging member lock->owner_pc is set to the caller of the lock
...@@ -200,4 +199,4 @@ void _do_write_unlock(rwlock_t *rw) ...@@ -200,4 +199,4 @@ void _do_write_unlock(rwlock_t *rw)
rw->lock = 0; rw->lock = 0;
} }
#endif /* SPIN_LOCK_DEBUG */ #endif /* SMP */
...@@ -18,17 +18,6 @@ ...@@ -18,17 +18,6 @@
#define GET_PROCESSOR4D_ID(reg) \ #define GET_PROCESSOR4D_ID(reg) \
lda [%g0] ASI_M_VIKING_TMP1, %reg; lda [%g0] ASI_M_VIKING_TMP1, %reg;
/* Blackbox */
#define GET_PROCESSOR_ID(reg) \
sethi %hi(___b_smp_processor_id), %reg; \
sethi %hi(boot_cpu_id), %reg; \
ldub [%reg + %lo(boot_cpu_id)], %reg;
#define GET_PROCESSOR_MID(reg) \
rd %tbr, %reg; \
srl %reg, 12, %reg; \
and %reg, 3, %reg;
/* All trap entry points _must_ begin with this macro or else you /* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that * lose. It makes sure the kernel has a proper window so that
* c-code can be called. * c-code can be called.
......
...@@ -53,8 +53,8 @@ void smp_info(struct seq_file *); ...@@ -53,8 +53,8 @@ void smp_info(struct seq_file *);
BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int) BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int)
BTFIXUPDEF_CALL(int, __smp_processor_id, void) BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
BTFIXUPDEF_BLACKBOX(smp_processor_id) BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
BTFIXUPDEF_BLACKBOX(load_current) BTFIXUPDEF_BLACKBOX(load_current)
#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
...@@ -129,7 +129,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -129,7 +129,7 @@ extern __inline__ int hard_smp_processor_id(void)
"=&r" (cpuid)); "=&r" (cpuid));
See btfixup.h and btfixupprep.c to understand how a blackbox works. See btfixup.h and btfixupprep.c to understand how a blackbox works.
*/ */
__asm__ __volatile__("sethi %%hi(___b_smp_processor_id), %0\n\t" __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
"sethi %%hi(boot_cpu_id), %0\n\t" "sethi %%hi(boot_cpu_id), %0\n\t"
"ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" : "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
"=&r" (cpuid)); "=&r" (cpuid));
...@@ -141,7 +141,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -141,7 +141,7 @@ extern __inline__ int hard_smp_processor_id(void)
int cpuid; int cpuid;
__asm__ __volatile__("mov %%o7, %%g1\n\t" __asm__ __volatile__("mov %%o7, %%g1\n\t"
"call ___f___smp_processor_id\n\t" "call ___f___hard_smp_processor_id\n\t"
" nop\n\t" " nop\n\t"
"mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2"); "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
return cpuid; return cpuid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment