Commit a756dff5 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents c4265b8b 58ee5199
This diff is collapsed.
This diff is collapsed.
......@@ -130,7 +130,8 @@ static void i8259_unmask_irq(unsigned int irq_nr)
static void i8259_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq].action)
i8259_unmask_irq(irq);
}
......
......@@ -443,7 +443,7 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
* use the action we have.
*/
action = NULL;
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action;
if (!action || !action->handler) {
ppc_spurious_interrupts++;
......@@ -468,7 +468,7 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
a different instance of this same irq, the other processor
will take care of it.
*/
if (!action)
if (unlikely(!action))
goto out;
......@@ -487,12 +487,12 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
handle_irq_event(irq, regs, action);
spin_lock(&desc->lock);
if (!(desc->status & IRQ_PENDING))
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
out:
desc->status &= ~IRQ_INPROGRESS;
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
......@@ -561,10 +561,6 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
/* is there anything to synchronize with? */
if (!irq_desc[irq].action)
return;
while (irq_desc[irq].status & IRQ_INPROGRESS)
barrier();
}
......
......@@ -1296,6 +1296,9 @@ _GLOBAL(sys_call_table)
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_security
.long sys_ni_syscall /* 225 - reserved for Tux */
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
.endr
......@@ -25,6 +25,7 @@
#include <asm/sections.h>
#include <asm/open_pic.h>
#include <asm/i8259.h>
#include <asm/hardirq.h>
#include "open_pic_defs.h"
......@@ -148,16 +149,15 @@ struct hw_interrupt_type open_pic_ipi = {
* data has probably been corrupted and we're going to panic or deadlock later
* anyway --Troy
*/
extern unsigned long* _get_SP(void);
#define check_arg_irq(irq) \
if (irq < open_pic_irq_offset || irq >= NumSources+open_pic_irq_offset \
|| ISR[irq - open_pic_irq_offset] == 0) { \
printk("open_pic.c:%d: illegal irq %d\n", __LINE__, irq); \
print_backtrace(_get_SP()); }
show_stack(NULL); }
#define check_arg_cpu(cpu) \
if (cpu < 0 || cpu >= NumProcessors){ \
printk("open_pic.c:%d: illegal cpu %d\n", __LINE__, cpu); \
print_backtrace(_get_SP()); }
show_stack(NULL); }
#else
#define check_arg_ipi(ipi) do {} while (0)
#define check_arg_timer(timer) do {} while (0)
......@@ -798,7 +798,8 @@ static void openpic_ack_irq(unsigned int irq_nr)
static void openpic_end_irq(unsigned int irq_nr)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq_nr].action)
openpic_enable_irq(irq_nr);
}
......
......@@ -236,7 +236,8 @@ ppc405_uic_end(unsigned int irq)
}
}
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))
&& irq_desc[irq].action) {
ppc_cached_irq_mask[word] |= 1 << (31 - bit);
switch (word){
case 0:
......
......@@ -90,7 +90,8 @@ static void m8260_end_irq(unsigned int irq_nr)
int bit, word;
volatile uint *simr;
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq_nr].action) {
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
......
......@@ -46,7 +46,8 @@ static void m8xx_unmask_irq(unsigned int irq_nr)
static void m8xx_end_irq(unsigned int irq_nr)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq_nr].action) {
int bit, word;
bit = irq_nr & 0x1f;
......
......@@ -44,6 +44,7 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/prom.h>
#include <asm/hardirq.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/Paca.h>
#endif
......@@ -293,7 +294,7 @@ void show_regs(struct pt_regs * regs)
break;
}
printk("\n");
print_backtrace((unsigned long *)regs->gpr[1]);
show_stack((unsigned long *)regs->gpr[1]);
}
void exit_thread(void)
......@@ -418,7 +419,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
#endif /* CONFIG_ALTIVEC */
}
#if 0
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
......@@ -431,7 +431,14 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
| tsk->thread.fpexc_mode;
return 0;
}
#endif
int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
unsigned int val;
val = __unpack_fe01(tsk->thread.fpexc_mode);
return put_user(val, (unsigned int *) adr);
}
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
......@@ -486,20 +493,25 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
}
void
print_backtrace(unsigned long *sp)
show_stack(unsigned long *sp)
{
int cnt = 0;
unsigned long i;
if (sp == NULL)
sp = (unsigned long *)_get_SP();
printk("Call backtrace: ");
while (sp) {
if (__get_user( i, &sp[1] ))
for (;;) {
if (__get_user(sp, (unsigned long **)sp))
break;
if (sp == NULL)
break;
if (__get_user(i, &sp[1]))
break;
if (cnt++ % 7 == 0)
printk("\n");
printk("%08lX ", i);
if (cnt > 32) break;
if (__get_user(sp, (unsigned long **)sp))
if (cnt > 32)
break;
}
printk("\n");
......
......@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <asm/uaccess.h>
#include <asm/page.h>
......@@ -168,6 +169,9 @@ int sys_ptrace(long request, long pid, long addr, long data)
/* are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
ret = security_ops->ptrace(current->parent, current);
if (ret)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
......
......@@ -66,7 +66,7 @@ unsigned long sysmap_size;
/* Used with the BI_MEMSIZE bootinfo parameter to store the memory
size value reported by the boot loader. */
unsigned int boot_mem_size;
unsigned long boot_mem_size;
unsigned long ISA_DMA_THRESHOLD;
unsigned long DMA_MODE_READ, DMA_MODE_WRITE;
......
......@@ -289,273 +289,6 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished);
}
#if 0 /* Old boot code. */
void __init smp_boot_cpus(void)
{
int i, cpu_nr;
struct task_struct *p;
printk("Entering SMP Mode...\n");
smp_store_cpu_info(0);
cpu_online_map = 1UL;
/*
* assume for now that the first cpu booted is
* cpu 0, the master -- Cort
*/
cpu_callin_map[0] = 1;
for (i = 0; i < NR_CPUS; i++) {
prof_counter[i] = 1;
prof_multiplier[i] = 1;
}
/*
* XXX very rough.
*/
cache_decay_ticks = HZ/100;
smp_ops = ppc_md.smp_ops;
if (smp_ops == NULL) {
printk("SMP not supported on this machine.\n");
return;
}
/* Probe platform for CPUs */
cpu_nr = smp_ops->probe();
/*
* only check for cpus we know exist. We keep the callin map
* with cpus at the bottom -- Cort
*/
if (cpu_nr > max_cpus)
cpu_nr = max_cpus;
#ifdef CONFIG_PPC_ISERIES
smp_iSeries_space_timers( cpu_nr );
#endif
for (i = 1; i < cpu_nr; i++) {
int c;
struct pt_regs regs;
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset(&regs, 0, sizeof(struct pt_regs));
p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0);
if (IS_ERR(p))
panic("failed fork for CPU %d", i);
init_idle(p, i);
unhash_process(p);
secondary_ti = p->thread_info;
p->thread_info->cpu = i;
/*
* There was a cache flush loop here to flush the cache
* to memory for the first 8MB of RAM. The cache flush
* has been pushed into the kick_cpu function for those
* platforms that need it.
*/
/* wake up cpus */
smp_ops->kick_cpu(i);
/*
* wait to see if the cpu made a callin (is actually up).
* use this value that I found through experimentation.
* -- Cort
*/
for (c = 1000; c && !cpu_callin_map[i]; c--)
udelay(100);
if (cpu_callin_map[i]) {
char buf[32];
sprintf(buf, "found cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
printk("Processor %d found.\n", i);
} else {
char buf[32];
sprintf(buf, "didn't find cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
printk("Processor %d is stuck.\n", i);
}
}
/* Setup CPU 0 last (important) */
smp_ops->setup_cpu(0);
/* FIXME: Not with hotplug CPUS --RR */
if (num_online_cpus() < 2)
smp_tb_synchronized = 1;
}
void __init smp_software_tb_sync(int cpu)
{
#define PASSES 4 /* 4 passes.. */
int pass;
int i, j;
/* stop - start will be the number of timebase ticks it takes for cpu0
* to send a message to all others and the first reponse to show up.
*
* ASSUMPTION: this time is similiar for all cpus
* ASSUMPTION: the time to send a one-way message is ping/2
*/
register unsigned long start = 0;
register unsigned long stop = 0;
register unsigned long temp = 0;
set_tb(0, 0);
/* multiple passes to get in l1 cache.. */
for (pass = 2; pass < 2+PASSES; pass++){
if (cpu == 0){
mb();
for (i = j = 1; i < NR_CPUS; i++, j++){
/* skip stuck cpus */
while (!cpu_callin_map[j])
++j;
while (cpu_callin_map[j] != pass)
barrier();
}
mb();
tb_sync_flag = pass;
start = get_tbl(); /* start timing */
while (tb_sync_flag)
mb();
stop = get_tbl(); /* end timing */
/* theoretically, the divisor should be 2, but
* I get better results on my dual mtx. someone
* please report results on other smp machines..
*/
tb_offset = (stop-start)/4;
mb();
tb_sync_flag = pass;
udelay(10);
mb();
tb_sync_flag = 0;
mb();
set_tb(0,0);
mb();
} else {
cpu_callin_map[cpu] = pass;
mb();
while (!tb_sync_flag)
mb(); /* wait for cpu0 */
mb();
tb_sync_flag = 0; /* send response for timing */
mb();
while (!tb_sync_flag)
mb();
temp = tb_offset; /* make sure offset is loaded */
while (tb_sync_flag)
mb();
set_tb(0,temp); /* now, set the timebase */
mb();
}
}
if (cpu == 0) {
smp_tb_synchronized = 1;
printk("smp_software_tb_sync: %d passes, final offset: %ld\n",
PASSES, tb_offset);
}
/* so time.c doesn't get confused */
set_dec(tb_ticks_per_jiffy);
last_jiffy_stamp(cpu) = 0;
}
void __init smp_commence(void)
{
/*
* Lets the callin's below out of their loop.
*/
if (ppc_md.progress) ppc_md.progress("smp_commence", 0x370);
wmb();
smp_commenced = 1;
/* if the smp_ops->setup_cpu function has not already synched the
* timebases with a nicer hardware-based method, do so now
*
* I am open to suggestions for improvements to this method
* -- Troy <hozer@drgw.net>
*
* NOTE: if you are debugging, set smp_tb_synchronized for now
* since if this code runs pretty early and needs all cpus that
* reported in in smp_callin_map to be working
*
* NOTE2: this code doesn't seem to work on > 2 cpus. -- paulus/BenH
*/
/* FIXME: This doesn't work with hotplug CPUs --RR */
if (!smp_tb_synchronized && num_online_cpus() == 2) {
unsigned long flags;
local_irq_save(flags);
smp_software_tb_sync(0);
local_irq_restore(flags);
}
}
void __init smp_callin(void)
{
int cpu = smp_processor_id();
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
/* Set online before we acknowledge. */
set_bit(cpu, &cpu_online_map);
wmb();
cpu_callin_map[cpu] = 1;
smp_ops->setup_cpu(cpu);
while (!smp_commenced)
barrier();
/* see smp_commence for more info */
if (!smp_tb_synchronized && num_online_cpus() == 2) {
smp_software_tb_sync(cpu);
}
local_irq_enable();
}
/* intel needs this */
void __init initialize_secondary(void)
{
}
/* Activate a secondary processor. */
int __init start_secondary(void *unused)
{
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
smp_callin();
return cpu_idle(NULL);
}
void __init smp_setup(char *str, int *ints)
{
}
int __init setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
void __init smp_store_cpu_info(int id)
{
struct cpuinfo_PPC *c = &cpu_data[id];
/* assume bogomips are same for everything */
c->loops_per_jiffy = loops_per_jiffy;
c->pvr = mfspr(PVR);
}
static int __init maxcpus(char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
#else /* New boot code */
/* FIXME: Do this properly for all archs --RR */
static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
static unsigned int timebase_upper = 0, timebase_lower = 0;
......@@ -643,6 +376,7 @@ int __devinit start_secondary(void *unused)
printk("CPU %i done callin...\n", cpu);
smp_ops->setup_cpu(cpu);
printk("CPU %i done setup...\n", cpu);
local_irq_enable();
smp_ops->take_timebase();
printk("CPU %i done timebase take...\n", cpu);
......@@ -707,4 +441,3 @@ void smp_cpus_done(unsigned int max_cpus)
{
smp_ops->setup_cpu(0);
}
#endif
......@@ -45,6 +45,7 @@
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
#include <asm/bootinfo.h>
#include "mem_pieces.h"
#include "mmu_decl.h"
......@@ -242,8 +243,13 @@ void __init MMU_init(void)
/*
* Figure out how much memory we have, how much
* is lowmem, and how much is highmem.
* is lowmem, and how much is highmem. If we were
* passed the total memory size from the bootloader,
* just use it.
*/
if (boot_mem_size)
total_memory = boot_mem_size;
else
total_memory = ppc_md.find_end_of_memory();
if (__max_memory && total_memory > __max_memory)
......
......@@ -602,8 +602,7 @@ int __debug_serinit( void )
{
unsigned long flags;
save_flags (flags);
cli();
local_irq_save(flags);
/* turn off Rx and Tx interrupts */
custom.intena = IF_RBF | IF_TBE;
......@@ -611,7 +610,7 @@ int __debug_serinit( void )
/* clear any pending interrupt */
custom.intreq = IF_RBF | IF_TBE;
restore_flags (flags);
local_irq_restore(flags);
/*
* set the appropriate directions for the modem control flags,
......
......@@ -77,25 +77,20 @@ smp_chrp_give_timebase(void)
spin_unlock(&timebase_lock);
while (timebase_upper || timebase_lower)
rmb();
barrier();
call_rtas("thaw-time-base", 0, 1, NULL);
}
void __devinit
smp_chrp_take_timebase(void)
{
int done = 0;
while (!done) {
while (!(timebase_upper || timebase_lower))
barrier();
spin_lock(&timebase_lock);
if (timebase_upper || timebase_lower) {
set_tb(timebase_upper, timebase_lower);
timebase_upper = 0;
timebase_lower = 0;
done = 1;
}
spin_unlock(&timebase_lock);
}
printk("CPU %i taken timebase\n", smp_processor_id());
}
......
......@@ -301,12 +301,6 @@ lopec_progress(char *s, unsigned short hex)
}
#endif /* CONFIG_SERIAL_TEXT_DEBUG */
static unsigned long __init
lopec_find_end_of_memory(void)
{
return mpc10x_get_mem_size(MPC10X_MEM_MAP_B);
}
TODC_ALLOC();
static void __init
......@@ -376,7 +370,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.power_off = lopec_power_off;
ppc_md.halt = lopec_halt;
ppc_md.find_end_of_memory = lopec_find_end_of_memory;
ppc_md.setup_io_mappings = lopec_map_io;
ppc_md.time_init = todc_time_init;
......
......@@ -149,7 +149,8 @@ static void __pmac pmac_unmask_irq(unsigned int irq_nr)
static void __pmac pmac_end_irq(unsigned int irq_nr)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq_nr].action) {
set_bit(irq_nr, ppc_cached_irq_mask);
pmac_set_irq_mask(irq_nr, 1);
}
......
......@@ -191,11 +191,8 @@ pplus_restart(char *cmd)
static void
pplus_halt(void)
{
unsigned long flags;
local_irq_disable();
/* set exception prefix high - to the prom */
save_flags( flags );
restore_flags( flags|MSR_IP );
_nmask_and_or_msr(MSR_EE, MSR_IP);
/* make sure bit 0 (reset) is a 0 */
outb( inb(0x92) & ~1L , 0x92 );
......
......@@ -458,10 +458,6 @@ prep_calibrate_decr(void)
/* If we didn't get it from the residual data, try this. */
if ( res ) {
unsigned long flags;
save_flags(flags);
#define TIMER0_COUNT 0x40
#define TIMER_CONTROL 0x43
/* set timer to periodic mode */
......@@ -476,7 +472,7 @@ prep_calibrate_decr(void)
/* wait for calibrate */
while ( calibrate_steps )
;
restore_flags(flags);
local_irq_disable();
free_irq( 0, NULL);
}
}
......@@ -581,11 +577,8 @@ prep_restart(char *cmd)
static void __prep
prep_halt(void)
{
unsigned long flags;
local_irq_disable();
/* set exception prefix high - to the prom */
save_flags( flags );
restore_flags( flags|MSR_IP );
_nmask_and_or_msr(MSR_EE, MSR_IP);
/* make sure bit 0 (reset) is a 0 */
outb( inb(0x92) & ~1L , 0x92 );
......@@ -648,11 +641,8 @@ static void __prep
prep_power_off(void)
{
if ( _prep_type == _PREP_IBM) {
unsigned long flags;
local_irq_disable();
/* set exception prefix high - to the prom */
save_flags( flags );
restore_flags( flags|MSR_IP );
_nmask_and_or_msr(MSR_EE, MSR_IP);
utah_sig87c750_setbit(21, 5, 1); /* set bit 21.5, "PMEXEC_OFF" */
......@@ -761,37 +751,6 @@ static struct smp_ops_t prep_smp_ops __prepdata = {
};
#endif /* CONFIG_SMP */
/*
* This finds the amount of physical ram and does necessary
* setup for prep. This is pretty architecture specific so
* this will likely stay separate from the pmac.
* -- Cort
*/
static unsigned long __init
prep_find_end_of_memory(void)
{
unsigned long total = 0;
extern unsigned int boot_mem_size;
#ifdef CONFIG_PREP_RESIDUAL
total = res->TotalMemory;
#endif
if (total == 0 && boot_mem_size != 0)
total = boot_mem_size;
else if (total == 0) {
/*
* I need a way to probe the amount of memory if the residual
* data doesn't contain it. -- Cort
*/
total = 0x02000000;
printk(KERN_INFO "Ramsize from residual data was 0"
" -- defaulting to %ldM\n", total>>20);
}
return (total);
}
/*
* Setup the bat mappings we're going to load that cover
* the io areas. RAM was mapped by mapin_ram().
......@@ -807,6 +766,7 @@ prep_map_io(void)
static int __init
prep_request_io(void)
{
if (_machine == _MACH_prep) {
#ifdef CONFIG_NVRAM
request_region(PREP_NVRAM_AS0, 0x8, "nvram");
#endif
......@@ -814,6 +774,7 @@ prep_request_io(void)
request_region(0x40,0x20,"timer");
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
}
return 0;
}
......@@ -879,7 +840,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.time_init = mk48t59_init;
}
ppc_md.find_end_of_memory = prep_find_end_of_memory;
ppc_md.setup_io_mappings = prep_map_io;
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
......
......@@ -187,12 +187,6 @@ spruce_halt(void)
extern int boot_mem_size;
static unsigned long __init
spruce_find_end_of_memory(void)
{
return boot_mem_size;
}
static void __init
spruce_map_io(void)
{
......@@ -214,7 +208,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.init_IRQ = cpc700_init_IRQ;
ppc_md.get_irq = cpc700_get_irq;
ppc_md.find_end_of_memory = spruce_find_end_of_memory;
ppc_md.setup_io_mappings = spruce_map_io;
ppc_md.restart = spruce_restart;
......
......@@ -26,10 +26,6 @@
#define SMP_MB
#endif /* CONFIG_SMP */
/*
* These used to be if'd out here because using : "cc" as a constraint
* resulted in errors from egcs. Things appear to be OK with gcc-2.95.
*/
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
unsigned long old;
......@@ -224,9 +220,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
{
__const__ unsigned int *p = (__const__ unsigned int *) addr;
return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
}
/* Return the bit position of the most significant 1 bit in a word */
......
......@@ -35,6 +35,7 @@ struct bi_record {
extern struct bi_record *find_bootinfo(void);
extern void parse_bootinfo(struct bi_record *rec);
extern unsigned long boot_mem_size;
#endif /* CONFIG_APUS */
......
......@@ -196,10 +196,11 @@
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
#define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */
#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */
#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */
#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */
#define ESR_MCI 0x80000000 /* 405 Machine Check - Instruction */
#define ESR_IMCP 0x80000000 /* 403 Inst. Mach. Check - Protection */
#define ESR_IMCN 0x40000000 /* 403 Inst. Mach. Check - Non-config */
#define ESR_IMCB 0x20000000 /* 403 Inst. Mach. Check - Bus error */
#define ESR_IMCT 0x10000000 /* 403 Inst. Mach. Check - Timeout */
#define ESR_PIL 0x08000000 /* Program Exception - Illegal */
#define ESR_PPR 0x04000000 /* Program Exception - Priveleged */
#define ESR_PTR 0x02000000 /* Program Exception - Trap */
......@@ -747,9 +748,10 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
/* Get/set floating-point exception mode */
#define GET_FP_EXC_MODE(tsk) __unpack_fe01((tsk)->thread.fpexc_mode)
#define SET_FP_EXC_MODE(tsk, val) set_fpexc_mode((tsk), (val))
#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
static inline unsigned int __unpack_fe01(unsigned int msr_bits)
......
......@@ -72,12 +72,26 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0)
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
......@@ -93,6 +107,16 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
......
......@@ -24,7 +24,7 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
......
......@@ -231,6 +231,8 @@
#define __NR_futex 221
#define __NR_sched_setaffinity 222
#define __NR_sched_getaffinity 223
#define __NR_security 224
#define __NR_tuxcall 225
#define __NR(n) #n
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment