Commit de5f503c authored by Linus Torvalds's avatar Linus Torvalds
parents d904ffd6 2fd628fe
...@@ -161,60 +161,6 @@ asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs) ...@@ -161,60 +161,6 @@ asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
return error; return error;
} }
struct dirent32 {
unsigned int d_ino;
unsigned int d_off;
unsigned short d_reclen;
char d_name[NAME_MAX + 1];
};
static void
xlate_dirent(void *dirent64, void *dirent32, long n)
{
long off;
struct dirent *dirp;
struct dirent32 *dirp32;
off = 0;
while (off < n) {
dirp = (struct dirent *)(dirent64 + off);
dirp32 = (struct dirent32 *)(dirent32 + off);
off += dirp->d_reclen;
dirp32->d_ino = dirp->d_ino;
dirp32->d_off = (unsigned int)dirp->d_off;
dirp32->d_reclen = dirp->d_reclen;
strncpy(dirp32->d_name, dirp->d_name, dirp->d_reclen - ((3 * 4) + 2));
}
return;
}
asmlinkage long
sys32_getdents(unsigned int fd, void * dirent32, unsigned int count)
{
long n;
void *dirent64;
dirent64 = (void *)((unsigned long)(dirent32 + (sizeof(long) - 1)) & ~(sizeof(long) - 1));
if ((n = sys_getdents(fd, dirent64, count - (dirent64 - dirent32))) < 0)
return(n);
xlate_dirent(dirent64, dirent32, n);
return(n);
}
asmlinkage int old_readdir(unsigned int fd, void * dirent, unsigned int count);
asmlinkage int
sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
{
int n;
struct dirent dirent64;
if ((n = old_readdir(fd, &dirent64, count)) < 0)
return(n);
xlate_dirent(&dirent64, dirent32, dirent64.d_reclen);
return(n);
}
asmlinkage int asmlinkage int
sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
{ {
......
...@@ -195,7 +195,7 @@ EXPORT(sysn32_call_table) ...@@ -195,7 +195,7 @@ EXPORT(sysn32_call_table)
PTR sys_fdatasync PTR sys_fdatasync
PTR sys_truncate PTR sys_truncate
PTR sys_ftruncate /* 6075 */ PTR sys_ftruncate /* 6075 */
PTR sys32_getdents PTR compat_sys_getdents
PTR sys_getcwd PTR sys_getcwd
PTR sys_chdir PTR sys_chdir
PTR sys_fchdir PTR sys_fchdir
......
...@@ -293,7 +293,7 @@ sys_call_table: ...@@ -293,7 +293,7 @@ sys_call_table:
PTR sys_uselib PTR sys_uselib
PTR sys_swapon PTR sys_swapon
PTR sys_reboot PTR sys_reboot
PTR sys32_readdir PTR compat_sys_old_readdir
PTR old_mmap /* 4090 */ PTR old_mmap /* 4090 */
PTR sys_munmap PTR sys_munmap
PTR sys_truncate PTR sys_truncate
...@@ -345,7 +345,7 @@ sys_call_table: ...@@ -345,7 +345,7 @@ sys_call_table:
PTR sys_setfsuid PTR sys_setfsuid
PTR sys_setfsgid PTR sys_setfsgid
PTR sys32_llseek /* 4140 */ PTR sys32_llseek /* 4140 */
PTR sys32_getdents PTR compat_sys_getdents
PTR compat_sys_select PTR compat_sys_select
PTR sys_flock PTR sys_flock
PTR sys_msync PTR sys_msync
......
...@@ -540,6 +540,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -540,6 +540,9 @@ void __init setup_arch(char **cmdline_p)
sparse_init(); sparse_init();
paging_init(); paging_init();
resource_init(); resource_init();
#ifdef CONFIG_SMP
plat_smp_setup();
#endif
} }
int __init fpu_disable(char *s) int __init fpu_disable(char *s)
......
...@@ -236,7 +236,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -236,7 +236,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
init_new_context(current, &init_mm); init_new_context(current, &init_mm);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling(); smp_tune_scheduling();
prom_prepare_cpus(max_cpus); plat_prepare_cpus(max_cpus);
} }
/* preload SMP state for boot cpu */ /* preload SMP state for boot cpu */
......
...@@ -143,7 +143,7 @@ static struct irqaction irq_call = { ...@@ -143,7 +143,7 @@ static struct irqaction irq_call = {
* Make sure all CPU's are in a sensible state before we boot any of the * Make sure all CPU's are in a sensible state before we boot any of the
* secondarys * secondarys
*/ */
void prom_prepare_cpus(unsigned int max_cpus) void plat_smp_setup(void)
{ {
unsigned long val; unsigned long val;
int i, num; int i, num;
...@@ -179,12 +179,10 @@ void prom_prepare_cpus(unsigned int max_cpus) ...@@ -179,12 +179,10 @@ void prom_prepare_cpus(unsigned int max_cpus)
write_vpe_c0_vpeconf0(tmp); write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */ /* Record this as available CPU */
if (i < max_cpus) {
cpu_set(i, phys_cpu_present_map); cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num; __cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i; __cpu_logical_map[num] = i;
} }
}
/* disable multi-threading with TC's */ /* disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
...@@ -241,7 +239,10 @@ void prom_prepare_cpus(unsigned int max_cpus) ...@@ -241,7 +239,10 @@ void prom_prepare_cpus(unsigned int max_cpus)
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
} }
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
......
...@@ -50,37 +50,25 @@ void __init prom_grab_secondary(void) ...@@ -50,37 +50,25 @@ void __init prom_grab_secondary(void)
* We don't want to start the secondary CPU yet nor do we have a nice probing * We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core. * feature in PMON so we just assume presence of the secondary core.
*/ */
static char maxcpus_string[] __initdata = void __init plat_smp_setup(void)
KERN_WARNING "max_cpus set to 0; using 1 instead\n";
void __init prom_prepare_cpus(unsigned int max_cpus)
{ {
int enabled = 0, i; int i;
if (max_cpus == 0) {
printk(maxcpus_string);
max_cpus = 1;
}
cpus_clear(phys_cpu_present_map); cpus_clear(phys_cpu_present_map);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (i == max_cpus)
break;
/*
* The boot CPU
*/
cpu_set(i, phys_cpu_present_map); cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = i; __cpu_number_map[i] = i;
__cpu_logical_map[i] = i; __cpu_logical_map[i] = i;
enabled++;
} }
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/* /*
* Be paranoid. Enable the IPI only if we're really about to go SMP. * Be paranoid. Enable the IPI only if we're really about to go SMP.
*/ */
if (enabled > 1) if (cpus_weight(cpu_possible_map))
set_c0_status(STATUSF_IP5); set_c0_status(STATUSF_IP5);
} }
......
...@@ -140,7 +140,7 @@ static __init void intr_clear_all(nasid_t nasid) ...@@ -140,7 +140,7 @@ static __init void intr_clear_all(nasid_t nasid)
REMOTE_HUB_CLR_INTR(nasid, i); REMOTE_HUB_CLR_INTR(nasid, i);
} }
void __init prom_prepare_cpus(unsigned int max_cpus) void __init plat_smp_setup(void)
{ {
cnodeid_t cnode; cnodeid_t cnode;
...@@ -161,6 +161,11 @@ void __init prom_prepare_cpus(unsigned int max_cpus) ...@@ -161,6 +161,11 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
alloc_cpupda(0, 0); alloc_cpupda(0, 0);
} }
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
/* /*
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc * set sp to the kernel stack of the newly created idle process, gp to the proc
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* *
* Common setup before any secondaries are started * Common setup before any secondaries are started
*/ */
void __init prom_prepare_cpus(unsigned int max_cpus) void __init plat_smp_setup(void)
{ {
int i, num; int i, num;
...@@ -40,14 +40,18 @@ void __init prom_prepare_cpus(unsigned int max_cpus) ...@@ -40,14 +40,18 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
__cpu_number_map[0] = 0; __cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0; __cpu_logical_map[0] = 0;
for (i=1, num=0; i<NR_CPUS; i++) { for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) { if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map); cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num; __cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i; __cpu_logical_map[num] = i;
} }
} }
printk("Detected %i available secondary CPU(s)\n", num); printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
} }
/* /*
......
...@@ -250,7 +250,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -250,7 +250,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" .set noreorder \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
" sync \n" " sync \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
...@@ -266,7 +269,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -266,7 +269,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" .set noreorder \n"
" beqz %0, 1b \n" " beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
" sync \n" " sync \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
...@@ -598,7 +604,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -598,7 +604,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" .set noreorder \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
" sync \n" " sync \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
...@@ -614,7 +623,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -614,7 +623,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" .set noreorder \n"
" beqz %0, 1b \n" " beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
" sync \n" " sync \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
......
...@@ -58,7 +58,9 @@ static inline int num_booting_cpus(void) ...@@ -58,7 +58,9 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map); return cpus_weight(cpu_callout_map);
} }
/* These are defined by the board-specific code. */ /*
* These are defined by the board-specific code.
*/
/* /*
* Cause the function described by call_data to be executed on the passed * Cause the function described by call_data to be executed on the passed
...@@ -79,7 +81,12 @@ extern void prom_boot_secondary(int cpu, struct task_struct *idle); ...@@ -79,7 +81,12 @@ extern void prom_boot_secondary(int cpu, struct task_struct *idle);
extern void prom_init_secondary(void); extern void prom_init_secondary(void);
/* /*
* Detect available CPUs, populate phys_cpu_present_map before smp_init * Populate cpu_possible_map before smp_init, called from setup_arch.
*/
extern void plat_smp_setup(void);
/*
* Called after init_IRQ but before __cpu_up.
*/ */
extern void prom_prepare_cpus(unsigned int max_cpus); extern void prom_prepare_cpus(unsigned int max_cpus);
......
...@@ -322,7 +322,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, ...@@ -322,7 +322,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
#endif #endif
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
: "=&r" (retval), "=m" (*m) : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new) : "R" (*m), "Jr" (old), "Jr" (new)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
...@@ -342,7 +342,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, ...@@ -342,7 +342,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
#endif #endif
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
: "=&r" (retval), "=m" (*m) : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new) : "R" (*m), "Jr" (old), "Jr" (new)
: "memory"); : "memory");
} else { } else {
...@@ -379,7 +379,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, ...@@ -379,7 +379,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
#endif #endif
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
: "=&r" (retval), "=m" (*m) : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new) : "R" (*m), "Jr" (old), "Jr" (new)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
...@@ -397,7 +397,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, ...@@ -397,7 +397,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
#endif #endif
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
: "=&r" (retval), "=m" (*m) : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new) : "R" (*m), "Jr" (old), "Jr" (new)
: "memory"); : "memory");
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment