Commit 8a05d490 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 35aa61ec fe6d20fc
......@@ -1296,6 +1296,7 @@ _GLOBAL(sys_call_table)
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_security
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
.endr
......@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <asm/uaccess.h>
#include <asm/page.h>
......@@ -168,6 +169,9 @@ int sys_ptrace(long request, long pid, long addr, long data)
/* are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
ret = security_ops->ptrace(current->parent, current);
if (ret)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
......
......@@ -289,273 +289,6 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished);
}
#if 0 /* Old boot code. */
void __init smp_boot_cpus(void)
{
int i, cpu_nr;
struct task_struct *p;
printk("Entering SMP Mode...\n");
smp_store_cpu_info(0);
cpu_online_map = 1UL;
/*
* assume for now that the first cpu booted is
* cpu 0, the master -- Cort
*/
cpu_callin_map[0] = 1;
for (i = 0; i < NR_CPUS; i++) {
prof_counter[i] = 1;
prof_multiplier[i] = 1;
}
/*
* XXX very rough.
*/
cache_decay_ticks = HZ/100;
smp_ops = ppc_md.smp_ops;
if (smp_ops == NULL) {
printk("SMP not supported on this machine.\n");
return;
}
/* Probe platform for CPUs */
cpu_nr = smp_ops->probe();
/*
* only check for cpus we know exist. We keep the callin map
* with cpus at the bottom -- Cort
*/
if (cpu_nr > max_cpus)
cpu_nr = max_cpus;
#ifdef CONFIG_PPC_ISERIES
smp_iSeries_space_timers( cpu_nr );
#endif
for (i = 1; i < cpu_nr; i++) {
int c;
struct pt_regs regs;
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset(&regs, 0, sizeof(struct pt_regs));
p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0);
if (IS_ERR(p))
panic("failed fork for CPU %d", i);
init_idle(p, i);
unhash_process(p);
secondary_ti = p->thread_info;
p->thread_info->cpu = i;
/*
* There was a cache flush loop here to flush the cache
* to memory for the first 8MB of RAM. The cache flush
* has been pushed into the kick_cpu function for those
* platforms that need it.
*/
/* wake up cpus */
smp_ops->kick_cpu(i);
/*
* wait to see if the cpu made a callin (is actually up).
* use this value that I found through experimentation.
* -- Cort
*/
for (c = 1000; c && !cpu_callin_map[i]; c--)
udelay(100);
if (cpu_callin_map[i]) {
char buf[32];
sprintf(buf, "found cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
printk("Processor %d found.\n", i);
} else {
char buf[32];
sprintf(buf, "didn't find cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
printk("Processor %d is stuck.\n", i);
}
}
/* Setup CPU 0 last (important) */
smp_ops->setup_cpu(0);
/* FIXME: Not with hotplug CPUS --RR */
if (num_online_cpus() < 2)
smp_tb_synchronized = 1;
}
void __init smp_software_tb_sync(int cpu)
{
#define PASSES 4 /* 4 passes.. */
int pass;
int i, j;
/* stop - start will be the number of timebase ticks it takes for cpu0
* to send a message to all others and the first reponse to show up.
*
* ASSUMPTION: this time is similiar for all cpus
* ASSUMPTION: the time to send a one-way message is ping/2
*/
register unsigned long start = 0;
register unsigned long stop = 0;
register unsigned long temp = 0;
set_tb(0, 0);
/* multiple passes to get in l1 cache.. */
for (pass = 2; pass < 2+PASSES; pass++){
if (cpu == 0){
mb();
for (i = j = 1; i < NR_CPUS; i++, j++){
/* skip stuck cpus */
while (!cpu_callin_map[j])
++j;
while (cpu_callin_map[j] != pass)
barrier();
}
mb();
tb_sync_flag = pass;
start = get_tbl(); /* start timing */
while (tb_sync_flag)
mb();
stop = get_tbl(); /* end timing */
/* theoretically, the divisor should be 2, but
* I get better results on my dual mtx. someone
* please report results on other smp machines..
*/
tb_offset = (stop-start)/4;
mb();
tb_sync_flag = pass;
udelay(10);
mb();
tb_sync_flag = 0;
mb();
set_tb(0,0);
mb();
} else {
cpu_callin_map[cpu] = pass;
mb();
while (!tb_sync_flag)
mb(); /* wait for cpu0 */
mb();
tb_sync_flag = 0; /* send response for timing */
mb();
while (!tb_sync_flag)
mb();
temp = tb_offset; /* make sure offset is loaded */
while (tb_sync_flag)
mb();
set_tb(0,temp); /* now, set the timebase */
mb();
}
}
if (cpu == 0) {
smp_tb_synchronized = 1;
printk("smp_software_tb_sync: %d passes, final offset: %ld\n",
PASSES, tb_offset);
}
/* so time.c doesn't get confused */
set_dec(tb_ticks_per_jiffy);
last_jiffy_stamp(cpu) = 0;
}
void __init smp_commence(void)
{
/*
* Lets the callin's below out of their loop.
*/
if (ppc_md.progress) ppc_md.progress("smp_commence", 0x370);
wmb();
smp_commenced = 1;
/* if the smp_ops->setup_cpu function has not already synched the
* timebases with a nicer hardware-based method, do so now
*
* I am open to suggestions for improvements to this method
* -- Troy <hozer@drgw.net>
*
* NOTE: if you are debugging, set smp_tb_synchronized for now
* since if this code runs pretty early and needs all cpus that
* reported in in smp_callin_map to be working
*
* NOTE2: this code doesn't seem to work on > 2 cpus. -- paulus/BenH
*/
/* FIXME: This doesn't work with hotplug CPUs --RR */
if (!smp_tb_synchronized && num_online_cpus() == 2) {
unsigned long flags;
local_irq_save(flags);
smp_software_tb_sync(0);
local_irq_restore(flags);
}
}
void __init smp_callin(void)
{
int cpu = smp_processor_id();
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
/* Set online before we acknowledge. */
set_bit(cpu, &cpu_online_map);
wmb();
cpu_callin_map[cpu] = 1;
smp_ops->setup_cpu(cpu);
while (!smp_commenced)
barrier();
/* see smp_commence for more info */
if (!smp_tb_synchronized && num_online_cpus() == 2) {
smp_software_tb_sync(cpu);
}
local_irq_enable();
}
/* intel needs this */
void __init initialize_secondary(void)
{
}
/* Activate a secondary processor. */
int __init start_secondary(void *unused)
{
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
smp_callin();
return cpu_idle(NULL);
}
void __init smp_setup(char *str, int *ints)
{
}
int __init setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
void __init smp_store_cpu_info(int id)
{
struct cpuinfo_PPC *c = &cpu_data[id];
/* assume bogomips are same for everything */
c->loops_per_jiffy = loops_per_jiffy;
c->pvr = mfspr(PVR);
}
static int __init maxcpus(char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
#else /* New boot code */
/* FIXME: Do this properly for all archs --RR */
static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
static unsigned int timebase_upper = 0, timebase_lower = 0;
......@@ -643,6 +376,7 @@ int __devinit start_secondary(void *unused)
printk("CPU %i done callin...\n", cpu);
smp_ops->setup_cpu(cpu);
printk("CPU %i done setup...\n", cpu);
local_irq_enable();
smp_ops->take_timebase();
printk("CPU %i done timebase take...\n", cpu);
......@@ -707,4 +441,3 @@ void smp_cpus_done(unsigned int max_cpus)
{
smp_ops->setup_cpu(0);
}
#endif
......@@ -77,25 +77,20 @@ smp_chrp_give_timebase(void)
spin_unlock(&timebase_lock);
while (timebase_upper || timebase_lower)
rmb();
barrier();
call_rtas("thaw-time-base", 0, 1, NULL);
}
void __devinit
smp_chrp_take_timebase(void)
{
int done = 0;
while (!done) {
while (!(timebase_upper || timebase_lower))
barrier();
spin_lock(&timebase_lock);
if (timebase_upper || timebase_lower) {
set_tb(timebase_upper, timebase_lower);
timebase_upper = 0;
timebase_lower = 0;
done = 1;
}
spin_unlock(&timebase_lock);
}
printk("CPU %i taken timebase\n", smp_processor_id());
}
......
......@@ -807,6 +807,7 @@ prep_map_io(void)
static int __init
prep_request_io(void)
{
if (_machine == _MACH_prep) {
#ifdef CONFIG_NVRAM
request_region(PREP_NVRAM_AS0, 0x8, "nvram");
#endif
......@@ -814,6 +815,7 @@ prep_request_io(void)
request_region(0x40,0x20,"timer");
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
}
return 0;
}
......
......@@ -26,10 +26,6 @@
#define SMP_MB
#endif /* CONFIG_SMP */
/*
* These used to be if'd out here because using : "cc" as a constraint
* resulted in errors from egcs. Things appear to be OK with gcc-2.95.
*/
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
unsigned long old;
......@@ -224,9 +220,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
{
__const__ unsigned int *p = (__const__ unsigned int *) addr;
return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
}
/* Return the bit position of the most significant 1 bit in a word */
......
......@@ -72,12 +72,26 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0)
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
......@@ -93,6 +107,16 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
......
......@@ -24,7 +24,7 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment