Commit 92cc7d38 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.34

parent 7a88bde7
......@@ -257,14 +257,12 @@ S: Australia
N: Alan Cox
E: alan@lxorguk.ukuu.org.uk (linux related - except big patches)
E: iialan@www.linux.org.uk (linux.org.uk/big patches)
E: iialan@www.uk.linux.org (linux.org.uk/big patches)
E: alan@cymru.net (commercial CymruNET stuff)
E: gw4pts@gw4pts.ampr.org (amateur radio stuff)
E: GW4PTS@GB7SWN (packet radio)
E: Please don't use iialan@iifeak.swan.ac.uk for Linux stuff
S: c/o 3Com/I^2IT Limited
E: Alan.Cox@linux.org (if others fail)
S: CymruNet Limited
S: The Innovation Centre
S: University Of Wales
S: Singleton Park
S: Swansea, SA2 8PP
S: Wales, UK
D: NET2Debugged/NET3 author
......
......@@ -539,6 +539,12 @@ CONFIG_NET_SECURITY
any security protocols currently and that this option only really supports
security on IPv4 links at the moment.
Socket Security API Support (EXPERIMENTAL)
CONFIG_NET_SECURITY
Enable use of the socket security API. Note that Linux does not include
any security protocols currently and that this option only really supports
security on IPv4 links at the moment.
Sun floppy controller support
CONFIG_BLK_DEV_SUNFD
This is support for floppy drives on Sun Sparc workstations. Say Y
......@@ -1530,9 +1536,10 @@ CONFIG_BLK_DEV_SR
Enable vendor-specific extensions (for SCSI CDROM)
CONFIG_BLK_DEV_SR_VENDOR
This enables the usage of vendor specific SCSI commands. This is
required for some stuff which is newer than the SCSI-II standard,
most important is the MultiSession CD support. You'll probably want
to say Y here, unless you have a _real old_ CD-ROM drive.
required for some stuff which is newer than the SCSI-II standard:
MultiSession CD support and some ioctls for reading Mode 2 Form 2
sectors. You'll probably want to say Y here, unless you have a
_real old_ CD-ROM drive.
SCSI generic support
CONFIG_CHR_DEV_SG
......
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 33
SUBLEVEL = 34
ARCH = i386
ARCH := $(shell uname -m | sed s/i.86/i386/)
#
# For SMP kernels, set this. We don't want to have this in the config file
......@@ -14,7 +14,7 @@ ARCH = i386
SMP = 1
#
# SMP profiling options
SMP_PROF = 1
# SMP_PROF = 1
.EXPORT_ALL_VARIABLES:
......
......@@ -197,7 +197,7 @@ $(TOPDIR)/include/linux/modversions.h:
endif # CONFIG_MODVERSIONS
ifneq "$(strip $(SYMTAB_OBJS))" ""
$(SYMTAB_OBJS): $(TOPDIR)/include/linux/modversions.h
$(SYMTAB_OBJS): $(TOPDIR)/include/linux/modversions.h $(SYMTAB_OBJS:.o=.c)
$(CC) $(CFLAGS) -DEXPORT_SYMTAB -c $(@:.o=.c)
endif
......
......@@ -19,7 +19,7 @@
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/checksum.h>
#include <asm/softirq.h>
extern void bcopy (const char *src, char *dst, int len);
extern struct hwrpb_struct *hwrpb;
......@@ -37,6 +37,7 @@ extern void __remlu (void);
extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(__alpha_bh_counter);
/* platform dependent support */
EXPORT_SYMBOL(_inb);
......
......@@ -53,7 +53,6 @@ extern void timer_interrupt(struct pt_regs * regs);
*/
static unsigned long irq_mask = ~0UL;
/*
* Update the hardware with the irq mask passed in MASK. The function
* exploits the fact that it is known that only bit IRQ has changed.
......@@ -274,6 +273,16 @@ static inline void handle_nmi(struct pt_regs * regs)
printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461));
}
unsigned int local_irq_count[NR_CPUS];
atomic_t __alpha_bh_counter;
#ifdef __SMP__
#error Me no hablo Alpha SMP
#else
#define irq_enter(cpu, irq) (++local_irq_count[cpu])
#define irq_exit(cpu, irq) (--local_irq_count[cpu])
#endif
static void unexpected_irq(int irq, struct pt_regs * regs)
{
struct irqaction *action;
......@@ -302,16 +311,19 @@ static void unexpected_irq(int irq, struct pt_regs * regs)
static inline void handle_irq(int irq, struct pt_regs * regs)
{
struct irqaction * action = irq_action[irq];
int cpu = smp_processor_id();
irq_enter(cpu, irq);
kstat.interrupts[irq]++;
if (!action) {
unexpected_irq(irq, regs);
return;
} else {
do {
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
}
do {
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
irq_exit(cpu, irq);
}
static inline void device_interrupt(int irq, int ack, struct pt_regs * regs)
......@@ -323,6 +335,7 @@ static inline void device_interrupt(int irq, int ack, struct pt_regs * regs)
return;
}
irq_enter(cpu, irq);
kstat.interrupts[irq]++;
action = irq_action[irq];
/*
......@@ -336,15 +349,16 @@ static inline void device_interrupt(int irq, int ack, struct pt_regs * regs)
*/
mask_irq(ack);
ack_irq(ack);
if (!action)
return;
if (action->flags & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
do {
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
unmask_irq(ack);
if (action) {
if (action->flags & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
do {
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
unmask_irq(ack);
}
irq_exit(cpu, irq);
}
#ifdef CONFIG_PCI
......
......@@ -13,7 +13,7 @@
#define ldq_u(x,y) \
__asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(unsigned long *)(y)))
__asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y)))
#define stq_u(x,y) \
__asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x))
......@@ -72,7 +72,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
* Ok. This isn't fun, but this is the EASY case.
*/
static inline unsigned long
csum_partial_cfu_aligned(unsigned long *src, unsigned long *dst,
csum_partial_cfu_aligned(const unsigned long *src, unsigned long *dst,
long len, unsigned long checksum,
int *errp)
{
......@@ -165,7 +165,7 @@ csum_partial_cfu_dest_aligned(unsigned long *src, unsigned long *dst,
* This is slightly less fun than the above..
*/
static inline unsigned long
csum_partial_cfu_src_aligned(unsigned long *src, unsigned long *dst,
csum_partial_cfu_src_aligned(const unsigned long *src, unsigned long *dst,
unsigned long doff,
long len, unsigned long checksum,
unsigned long partial_dest,
......@@ -227,7 +227,7 @@ csum_partial_cfu_src_aligned(unsigned long *src, unsigned long *dst,
* look at this too closely, you'll go blind.
*/
static inline unsigned long
csum_partial_cfu_unaligned(unsigned long * src, unsigned long * dst,
csum_partial_cfu_unaligned(const unsigned long * src, unsigned long * dst,
unsigned long soff, unsigned long doff,
long len, unsigned long checksum,
unsigned long partial_dest,
......@@ -305,7 +305,7 @@ csum_partial_cfu_unaligned(unsigned long * src, unsigned long * dst,
}
static unsigned int
do_csum_partial_copy_from_user(char *src, char *dst, int len,
do_csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *errp)
{
unsigned long checksum = (unsigned) sum;
......@@ -316,12 +316,12 @@ do_csum_partial_copy_from_user(char *src, char *dst, int len,
if (!doff) {
if (!soff)
checksum = csum_partial_cfu_aligned(
(unsigned long *) src,
(const unsigned long *) src,
(unsigned long *) dst,
len-8, checksum, errp);
else
checksum = csum_partial_cfu_dest_aligned(
(unsigned long *) src,
(const unsigned long *) src,
(unsigned long *) dst,
soff, len-8, checksum, errp);
} else {
......@@ -329,13 +329,13 @@ do_csum_partial_copy_from_user(char *src, char *dst, int len,
ldq_u(partial_dest, dst);
if (!soff)
checksum = csum_partial_cfu_src_aligned(
(unsigned long *) src,
(const unsigned long *) src,
(unsigned long *) dst,
doff, len-8, checksum,
partial_dest, errp);
else
checksum = csum_partial_cfu_unaligned(
(unsigned long *) src,
(const unsigned long *) src,
(unsigned long *) dst,
soff, doff, len-8, checksum,
partial_dest, errp);
......@@ -352,7 +352,7 @@ do_csum_partial_copy_from_user(char *src, char *dst, int len,
}
unsigned int
csum_partial_copy_from_user(char *src, char *dst, int len,
csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *errp)
{
if (!access_ok(src, len, VERIFY_READ)) {
......
......@@ -64,10 +64,10 @@ void show_mem(void)
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
......@@ -163,7 +163,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
if (PageReserved(mem_map+MAP_NR(tmp)))
continue;
mem_map[MAP_NR(tmp)].count = 1;
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
......@@ -189,9 +189,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map+i))
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
......@@ -30,6 +30,12 @@ CONFIG_BINFMT_ELF=y
# CONFIG_M586 is not set
CONFIG_M686=y
# CONFIG_VIDEO_SELECT is not set
# CONFIG_PNP_PARPORT is not set
#
# Plug and Play support
#
# CONFIG_PNP is not set
#
# Floppy, IDE, and other block devices
......@@ -131,7 +137,6 @@ CONFIG_SCSI_OMIT_FLASHPOINT=y
# CONFIG_SCSI_NCR53C406A is not set
# CONFIG_SCSI_NCR53C7xx is not set
# CONFIG_SCSI_NCR53C8XX is not set
# CONFIG_SCSI_PPA is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
......@@ -163,7 +168,6 @@ CONFIG_EEXPRESS_PRO100=y
# CONFIG_NET_POCKET is not set
# CONFIG_FDDI is not set
# CONFIG_DLCI is not set
# CONFIG_PLIP is not set
# CONFIG_PPP is not set
# CONFIG_NET_RADIO is not set
# CONFIG_SLIP is not set
......@@ -214,7 +218,6 @@ CONFIG_VT_CONSOLE=y
CONFIG_SERIAL=y
# CONFIG_SERIAL_EXTENDED is not set
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_PRINTER is not set
CONFIG_MOUSE=y
# CONFIG_ATIXL_BUSMOUSE is not set
# CONFIG_BUSMOUSE is not set
......
......@@ -36,7 +36,7 @@
#include <asm/pgtable.h>
#ifdef __SMP_PROF__
extern volatile unsigned long smp_apic_timer_ticks[1+NR_CPUS];
extern volatile unsigned long smp_local_timer_ticks[1+NR_CPUS];
#endif
#define CR0_NE 32
......@@ -45,6 +45,11 @@ static unsigned char cache_21 = 0xff;
static unsigned char cache_A1 = 0xff;
unsigned int local_irq_count[NR_CPUS];
#ifdef __SMP__
atomic_t __intel_bh_counter;
#else
int __intel_bh_counter;
#endif
#ifdef __SMP_PROF__
static unsigned int int_count[NR_CPUS][NR_IRQS] = {{0},};
......@@ -252,6 +257,9 @@ int get_irq_list(char *buf)
#ifdef __SMP_PROF__
extern unsigned int prof_multiplier[NR_CPUS];
extern unsigned int prof_counter[NR_CPUS];
int get_smp_prof_list(char *buf) {
int i,j, len = 0;
struct irqaction * action;
......@@ -259,7 +267,7 @@ int get_smp_prof_list(char *buf) {
unsigned long sum_spins_syscall = 0;
unsigned long sum_spins_sys_idle = 0;
unsigned long sum_smp_idle_count = 0;
unsigned long sum_apic_timer_ticks = 0;
unsigned long sum_local_timer_ticks = 0;
for (i=0;i<smp_num_cpus;i++) {
int cpunum = cpu_logical_map[i];
......@@ -267,7 +275,7 @@ int get_smp_prof_list(char *buf) {
sum_spins_syscall+=smp_spins_syscall[cpunum];
sum_spins_sys_idle+=smp_spins_sys_idle[cpunum];
sum_smp_idle_count+=smp_idle_count[cpunum];
sum_apic_timer_ticks+=smp_apic_timer_ticks[cpunum];
sum_local_timer_ticks+=smp_local_timer_ticks[cpunum];
}
len += sprintf(buf+len,"CPUS: %10i \n", smp_num_cpus);
......@@ -324,12 +332,23 @@ int get_smp_prof_list(char *buf) {
len +=sprintf(buf+len," idle ticks\n");
len+=sprintf(buf+len,"TICK %10lu",sum_apic_timer_ticks);
len+=sprintf(buf+len,"TICK %10lu",sum_local_timer_ticks);
for (i=0;i<smp_num_cpus;i++)
len+=sprintf(buf+len," %10lu",smp_apic_timer_ticks[cpu_logical_map[i]]);
len+=sprintf(buf+len," %10lu",smp_local_timer_ticks[cpu_logical_map[i]]);
len +=sprintf(buf+len," local APIC timer ticks\n");
len+=sprintf(buf+len,"MULT: ");
for (i=0;i<smp_num_cpus;i++)
len+=sprintf(buf+len," %10u",prof_multiplier[cpu_logical_map[i]]);
len +=sprintf(buf+len," profiling multiplier\n");
len+=sprintf(buf+len,"COUNT: ");
for (i=0;i<smp_num_cpus;i++)
len+=sprintf(buf+len," %10u",prof_counter[cpu_logical_map[i]]);
len +=sprintf(buf+len," profiling counter\n");
len+=sprintf(buf+len, "IPI: %10lu received\n",
ipi_count);
......@@ -345,7 +364,7 @@ int get_smp_prof_list(char *buf) {
#ifdef __SMP__
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile int global_irq_lock;
unsigned volatile int global_irq_count;
atomic_t global_irq_count;
#define irq_active(cpu) \
(global_irq_count != local_irq_count[cpu])
......@@ -373,7 +392,7 @@ static unsigned long previous_irqholder;
#undef STUCK
#define STUCK \
if (!--stuck) {printk("wait_on_irq CPU#%d stuck at %08lx, waiting for %08lx (local=%d, global=%d)\n", cpu, where, previous_irqholder, local_count, global_irq_count); stuck = INIT_STUCK; }
if (!--stuck) {printk("wait_on_irq CPU#%d stuck at %08lx, waiting for %08lx (local=%d, global=%d)\n", cpu, where, previous_irqholder, local_count, atomic_read(&global_irq_count)); stuck = INIT_STUCK; }
static inline void wait_on_irq(int cpu, unsigned long where)
{
......@@ -381,7 +400,7 @@ static inline void wait_on_irq(int cpu, unsigned long where)
int local_count = local_irq_count[cpu];
/* Are we the only one in an interrupt context? */
while (local_count != global_irq_count) {
while (local_count != atomic_read(&global_irq_count)) {
/*
* No such luck. Now we need to release the lock,
* _and_ release our interrupt context, because
......@@ -398,7 +417,7 @@ static inline void wait_on_irq(int cpu, unsigned long where)
for (;;) {
STUCK;
check_smp_invalidate(cpu);
if (global_irq_count)
if (atomic_read(&global_irq_count))
continue;
if (global_irq_lock)
continue;
......@@ -424,7 +443,7 @@ void synchronize_irq(void)
int local_count = local_irq_count[cpu];
/* Do we need to wait? */
if (local_count != global_irq_count) {
if (local_count != atomic_read(&global_irq_count)) {
/* The stupid way to do this */
cli();
sti();
......@@ -512,7 +531,7 @@ void __global_restore_flags(unsigned long flags)
#define STUCK \
if (!--stuck) {printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n",irq,cpu,global_irq_holder); stuck = INIT_STUCK;}
static inline void irq_enter(int cpu, int irq)
inline void irq_enter(int cpu, int irq)
{
int stuck = INIT_STUCK;
......@@ -525,21 +544,19 @@ static inline void irq_enter(int cpu, int irq)
STUCK;
/* nothing */;
}
atomic_inc(&intr_count);
}
static inline void irq_exit(int cpu, int irq)
inline void irq_exit(int cpu, int irq)
{
__cli();
atomic_dec(&intr_count);
hardirq_exit(cpu);
release_irqlock(cpu);
}
#else
#define irq_enter(cpu, irq) (++intr_count)
#define irq_exit(cpu, irq) (--intr_count)
#define irq_enter(cpu, irq) (++local_irq_count[cpu])
#define irq_exit(cpu, irq) (--local_irq_count[cpu])
#endif
......
This diff is collapsed.
......@@ -364,6 +364,29 @@ static int set_rtc_mmss(unsigned long nowtime)
/* last time the cmos clock got updated */
static long last_rtc_update = 0;
/*
* Move this to a header file - right now it shows
* up both here and in smp.c
*/
inline void x86_do_profile (unsigned long eip)
{
if (prof_buffer && current->pid) {
extern int _stext;
eip -= (unsigned long) &_stext;
eip >>= prof_shift;
if (eip < prof_len)
atomic_inc(&prof_buffer[eip]);
else
/*
* Dont ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
atomic_inc(&prof_buffer[prof_len-1]);
}
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -371,6 +394,14 @@ static long last_rtc_update = 0;
static inline void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
/*
* In the SMP case we use the local APIC timer interrupt to do the
* profiling.
*/
#ifndef __SMP__
if (!user_mode(regs))
x86_do_profile(regs->eip);
#endif
/*
* If we have an externally synchronized Linux clock, then update
......@@ -496,6 +527,7 @@ unsigned long get_cmos_time(void)
static struct irqaction irq0 = { timer_interrupt, 0, 0, "timer", NULL, NULL};
void time_init(void)
{
xtime.tv_sec = get_cmos_time();
......
......@@ -79,10 +79,10 @@ void show_mem(void)
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
......@@ -259,7 +259,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
datapages++;
continue;
}
mem_map[MAP_NR(tmp)].count = 1;
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start || (tmp < initrd_start || tmp >=
initrd_end))
......@@ -316,9 +316,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map+i))
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
......@@ -71,12 +71,12 @@ void show_mem(void)
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else if (mem_map[i].count == 1)
else if (atomic_read(&mem_map[i].count) == 1)
nonshared++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
......@@ -463,7 +463,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
datapages++;
continue;
}
mem_map[MAP_NR(tmp)].count = 1;
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
......@@ -495,9 +495,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map+i))
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
......@@ -397,5 +397,5 @@ void init_IRQ(void)
bh_base[i].data = NULL;
}
bh_active = 0;
intr_count = 0;
atomic_set(&intr_count, 0);
}
......@@ -208,10 +208,10 @@ void show_mem(void)
total++;
if (mem_map[i].reserved)
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%d pages of RAM\n", total);
printk("%d free pages\n", free);
......@@ -268,7 +268,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
datapages++;
continue;
}
mem_map[MAP_NR(tmp)].count = 1;
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
......@@ -299,9 +299,9 @@ void si_meminfo(struct sysinfo *val)
if (mem_map[i].reserved)
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
......@@ -161,7 +161,7 @@ inline void
process_IRQ(int irq, int _irq, struct pt_regs *regs)
{
struct irq_action *action;
intr_count++;
atomic_inc(&intr_count);
if (irq < 16)
{
/* Mask interrupt */
......@@ -242,7 +242,7 @@ process_IRQ(int irq, int _irq, struct pt_regs *regs)
{
BeBox_enable_irq(irq);
}
intr_count--;
atomic_dec(&intr_count);
}
asmlinkage inline void handle_IRQ(struct pt_regs *regs)
......
......@@ -72,10 +72,10 @@ void show_mem(void)
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%lu pages of RAM\n",total);
printk("%lu free pages\n",free);
......@@ -161,7 +161,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
continue;
}
clear_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
mem_map[MAP_NR(tmp)].count = 1;
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
......@@ -192,9 +192,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map+i))
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
# $Id: Makefile,v 1.26 1997/03/04 16:26:50 jj Exp $
# $Id: Makefile,v 1.27 1997/04/07 06:54:08 davem Exp $
# sparc/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
......@@ -33,7 +33,8 @@ LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/prom/promlib.a \
ifdef CONFIG_AP1000
SUBDIRS := $(SUBDIRS) arch/sparc/ap1000 mpp
CORE_FILES := $(TOPDIR)/arch/sparc/ap1000/ap1000lib.o $(TOPDIR)/mpp/mpplib.o $(CORE_FILES)
CORE_FILES := $(TOPDIR)/arch/sparc/ap1000/ap1000lib.o \
$(TOPDIR)/mpp/mpplib.o $(CORE_FILES)
DRIVERS := $(DRIVERS) drivers/ap1000/ap1000.a
CFLAGS := $(CFLAGS) -D__MPP__=1
endif
......
# $Id: config.in,v 1.33 1997/02/05 14:25:01 tdyas Exp $
# $Id: config.in,v 1.35 1997/04/07 06:54:09 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
......
......@@ -49,6 +49,7 @@ TADPOLE_FB_WEITEK=y
CONFIG_SUN_OPENPROMIO=m
CONFIG_SUN_MOSTEK_RTC=y
# CONFIG_SUN_BPP is not set
# CONFIG_SUN_VIDEOPIX is not set
#
# Linux/SPARC audio subsystem (EXPERIMENTAL)
......@@ -187,6 +188,9 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_RNFS_BOOTP=y
CONFIG_RNFS_RARP=y
CONFIG_NFSD=m
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_SMB_FS=m
CONFIG_SMB_WIN95=y
CONFIG_NCP_FS=m
......@@ -195,6 +199,7 @@ CONFIG_HPFS_FS=m
CONFIG_SYSV_FS=m
CONFIG_AFFS_FS=m
CONFIG_ROMFS_FS=m
CONFIG_AUTOFS_FS=m
CONFIG_AMIGA_PARTITION=y
CONFIG_UFS_FS=y
CONFIG_BSD_DISKLABEL=y
......
# $Id: Makefile,v 1.38 1997/03/04 16:26:29 jj Exp $
# $Id: Makefile,v 1.39 1997/04/01 02:21:44 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
......@@ -15,6 +15,7 @@ ifdef SMP
.S.o:
$(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o
CHECKASM_CC = $(CC) -D__SMP__
else
......@@ -24,7 +25,7 @@ else
.S.o:
$(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
CHECKASM_CC = $(CC)
endif
all: kernel.o head.o
......@@ -62,14 +63,14 @@ endif
check_asm: dummy
@echo "#include <linux/sched.h>" > tmp.c
$(CC) -E tmp.c -o tmp.i
$(CHECKASM_CC) -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c; echo "#include <linux/sched.h>" >> check_asm.c; echo 'struct task_struct _task; struct mm_struct _mm; struct thread_struct _thread; int main(void) { printf ("/* Automatically generated. Do not edit. */\n#ifndef __ASM_OFFSETS_H__\n#define __ASM_OFFSETS_H__\n\n");' >> check_asm.c
$(SH) ./check_asm.sh task tmp.i check_asm.c
$(SH) ./check_asm.sh mm tmp.i check_asm.c
$(SH) ./check_asm.sh thread tmp.i check_asm.c
@echo 'printf ("\n#endif /* __ASM_OFFSETS_H__ */\n"); return 0; }' >> check_asm.c
@rm -f tmp.[ci]
$(CC) -o check_asm check_asm.c
$(CHECKASM_CC) -o check_asm check_asm.c
./check_asm > asm_offsets.h
@if test -r $(HPATH)/asm/asm_offsets.h; then if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then echo $(HPATH)/asm/asm_offsets.h is unchanged; rm -f asm_offsets.h; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi
@rm -f check_asm check_asm.c
......
/* $Id: entry.S,v 1.133 1997/03/04 16:26:22 jj Exp $
/* $Id: entry.S,v 1.137 1997/04/14 05:38:17 davem Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -288,28 +288,11 @@ real_irq_entry:
SAVE_ALL
#ifdef __SMP__
cmp %l7, 13
bne 1f
nop
/* This is where we catch the level 13 reschedule soft-IRQ. */
GET_PROCESSOR_MID(o3, o2)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
sethi %hi(0x20000000), %o4
sll %o3, 12, %o3
add %o5, %o3, %o5
ld [%o5], %o1 ! read processor irq pending reg
andcc %o1, %o4, %g0
be 1f
cmp %l7, 12
bgu maybe_smp_msg
nop
b,a linux_trap_ipi13_sun4m
1:
#endif
real_irq_continue:
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
......@@ -318,11 +301,103 @@ real_irq_entry:
mov %l7, %o0 ! irq level
call C_LABEL(handler_irq)
add %sp, REGWIN_SZ, %o1 ! pt_regs ptr
#if 1 /* ndef __SMP__ */ /* You don't want to know... -DaveM */
wr %l0, PSR_ET, %psr
WRITE_PAUSE
#endif
RESTORE_ALL
#ifdef __SMP__
/* Here is where we check for possible SMP IPI passed to us
* on some level other than 15 which is the NMI and only used
* for cross calls. That has a seperate entry point below.
*/
maybe_smp_msg:
GET_PROCESSOR_MID(o3, o2)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
sethi %hi(0x60000000), %o4
sll %o3, 12, %o3
ld [%o5 + %o3], %o1
andcc %o1, %o4, %g0
be real_irq_continue
cmp %l7, 13
add %o5, %o3, %o5
bne,a 1f
sethi %hi(0x40000000), %o2
sethi %hi(0x20000000), %o2
1:
st %o2, [%o5 + 0x4]
WRITE_PAUSE
ld [%o5], %g0
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
cmp %l7, 13
bne 2f
nop
call C_LABEL(smp_reschedule_irq)
add %o7, 8, %o7
2:
call C_LABEL(smp_stop_cpu_irq)
nop
RESTORE_ALL
.align 4
.globl linux_trap_ipi15_sun4m
linux_trap_ipi15_sun4m:
SAVE_ALL
sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0, o1)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
sll %o0, 12, %o0
add %o5, %o0, %o5
ld [%o5], %o3
andcc %o3, %o2, %g0
be 1f ! Must be an NMI async memory error
st %o2, [%o5 + 4]
WRITE_PAUSE
ld [%o5], %g0
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp_cross_call_irq)
nop
b ret_trap_lockless_ipi
clr %l6
1:
/* NMI async memory error handling. */
sethi %hi(0x80000000), %l4
sethi %hi(0x4000), %o3
sub %o5, %o0, %o5
add %o5, %o3, %l5
st %l4, [%l5 + 0xc]
WRITE_PAUSE
ld [%l5], %g0
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(sun4m_nmi)
nop
st %l4, [%l5 + 0x8]
WRITE_PAUSE
ld [%l5], %g0
WRITE_PAUSE
RESTORE_ALL
#endif /* __SMP__ */
/* This routine handles illegal instructions and privileged
* instruction attempts from user code.
*/
......@@ -671,107 +746,6 @@ linux_trap_nmi_sun4c:
RESTORE_ALL
#ifdef __SMP__
.align 4
.globl linux_trap_ipi13_sun4m
linux_trap_ipi13_sun4m:
sethi %hi(0x20000000), %o2
GET_PROCESSOR_MID(o0, o1)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
sll %o0, 12, %o0
add %o5, %o0, %o5
st %o2, [%o5 + 4]
WRITE_PAUSE
ld [%o5], %g0
WRITE_PAUSE
/* IRQ's off else we deadlock. */
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp_reschedule_irq)
nop
RESTORE_ALL
.align 4
.globl linux_trap_ipi15_sun4m
linux_trap_ipi15_sun4m:
SAVE_ALL
/* First check for hard NMI memory error. */
sethi %hi(0xf0000000), %o2
set C_LABEL(sun4m_interrupts), %l5
set 0x4000, %o3
ld [%l5], %l5
add %l5, %o3, %l5
ld [%l5], %l6
andcc %o2, %l6, %o2
be 1f
nop
/* Asynchronous fault, why you little ?!#&%@... */
sethi %hi(0x80000000), %o2
st %o2, [%l5 + 0xc]
WRITE_PAUSE
ld [%l5], %g0
WRITE_PAUSE
/* All interrupts are off... now safe to enable traps
* and call C-code.
*/
or %l0, PSR_PIL, %l4 ! I am very paranoid...
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(sun4m_nmi)
nop
sethi %hi(0x80000000), %o2
st %o2, [%l5 + 0x8]
WRITE_PAUSE
ld [%l5], %g0
WRITE_PAUSE
RESTORE_ALL
1:
sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0, o1)
set C_LABEL(sun4m_interrupts), %l5
ld [%l5], %o5
sll %o0, 12, %o0
add %o5, %o0, %o5
st %o2, [%o5 + 4]
WRITE_PAUSE
ld [%o5], %g0
WRITE_PAUSE
/* IRQ's off else we deadlock. */
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(smp_message_irq)
nop
b ret_trap_lockless_ipi
clr %l6
#endif
.align 4
.globl C_LABEL(invalid_segment_patch1_ff)
.globl C_LABEL(invalid_segment_patch2_ff)
......@@ -839,7 +813,22 @@ sun4c_fault:
be sun4c_fault_fromuser
and %l5, %l4, %l5
lduba [%l5] ASI_SEGMAP, %l4
/* Test for NULL pte_t * in vmalloc area. */
sethi %hi(SUN4C_VMALLOC_START), %l4
cmp %l5, %l4
blu,a C_LABEL(invalid_segment_patch1)
lduba [%l5] ASI_SEGMAP, %l4
srl %l5, SUN4C_PGDIR_SHIFT, %l6
sethi %hi(C_LABEL(swapper_pg_dir)), %l4
or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
sll %l6, 2, %l6
ld [%l4 + %l6], %l4
andcc %l4, PAGE_MASK, %g0
be sun4c_fault_fromuser
lduba [%l5] ASI_SEGMAP, %l4
C_LABEL(invalid_segment_patch1):
cmp %l4, 0x7f
bne 1f
......@@ -1268,11 +1257,9 @@ C_LABEL(ret_from_syscall):
#ifdef __SMP__
.globl C_LABEL(ret_from_smpfork)
C_LABEL(ret_from_smpfork):
mov NO_PROC_ID, %o5
sethi %hi(C_LABEL(klock_info)), %o4
or %o4, %lo(C_LABEL(klock_info)), %o4
stb %o5, [%o4 + 1]
stb %g0, [%o4 + 0]
/* Nowadays all we need to do is drop the scheduler lock. */
sethi %hi(C_LABEL(scheduler_lock)), %o4
stb %g0, [%o4 + %lo(C_LABEL(scheduler_lock))]
wr %l0, PSR_ET, %psr
WRITE_PAUSE
b C_LABEL(ret_sys_call)
......@@ -1605,6 +1592,7 @@ C_LABEL(udelay):
#else
GET_PROCESSOR_OFFSET(o4)
set C_LABEL(cpu_data), %o3
sll %o4, 1, %o4
call .umul
ld [%o3 + %o4], %o1
#endif
......
/* $Id: ioport.c,v 1.22 1996/10/11 00:59:46 davem Exp $
/* $Id: ioport.c,v 1.24 1997/04/10 03:02:32 davem Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -45,8 +45,8 @@ unsigned long sparc_iobase_vaddr = IOBASE_VADDR;
* The virtual address where the mapping actually took place.
*/
void *sparc_alloc_io (void *address, void *virtual, int len, char *name,
int bus_type, int rdonly)
void *sparc_alloc_io (u32 address, void *virtual, int len, char *name,
u32 bus_type, int rdonly)
{
unsigned long vaddr, base_address;
unsigned long addr = (unsigned long) address;
......@@ -110,7 +110,7 @@ void sparc_free_io (void *virtual, int len)
* now have to know the peculiarities of where to read the Lance data
* from. (for example)
*/
void *sparc_dvma_malloc (int len, char *name)
void *_sparc_dvma_malloc (int len, char *name)
{
unsigned long vaddr, base_address;
......
/* $Id: irq.c,v 1.59 1997/01/06 06:52:21 davem Exp $
/* $Id: irq.c,v 1.66 1997/04/14 05:38:21 davem Exp $
* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Sparc the IRQ's are basically 'cast in stone'
* and you are supposed to probe the prom's device
......@@ -37,6 +37,9 @@
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/spinlock.h>
#include <asm/hardirq.h>
#include <asm/softirq.h>
/*
* Dave Redman (djhr@tadpole.co.uk)
......@@ -66,6 +69,8 @@ static void irq_panic(void)
void (*enable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
void (*disable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
void (*enable_pil_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
void (*disable_pil_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
void (*clear_clock_irq)( void ) = irq_panic;
void (*clear_profile_irq)( void ) = irq_panic;
void (*load_profile_irq)( unsigned int ) = (void (*)(unsigned int)) irq_panic;
......@@ -178,6 +183,201 @@ void free_irq(unsigned int irq, void *dev_id)
restore_flags(flags);
}
/* Per-processor IRQ locking depth, both SMP and non-SMP code use this. */
unsigned int local_irq_count[NR_CPUS];
atomic_t __sparc_bh_counter;
#ifdef __SMP__
/* SMP interrupt locking on Sparc. */
/* Who has global_irq_lock. */
unsigned char global_irq_holder = NO_PROC_ID;
/* This protects IRQ's. */
spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
/* This protects BH software state (masks, things like that). */
spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
/* Global IRQ locking depth. */
atomic_t global_irq_count = ATOMIC_INIT;
#define irq_active(cpu) \
(atomic_read(&global_irq_count) != local_irq_count[cpu])
static unsigned long previous_irqholder;
#define INIT_STUCK 10000000
#define STUCK \
if (!--stuck) { \
printk("wait_on_irq CPU#%d stuck at %08lx, waiting for [%08lx:%x] " \
"(local=[%d(%x:%x:%x:%x)], global=[%d:%x]) ", \
cpu, where, previous_irqholder, global_irq_holder, \
local_count, local_irq_count[0], local_irq_count[1], \
local_irq_count[2], local_irq_count[3], \
atomic_read(&global_irq_count), global_irq_lock); \
printk("g[%d:%x]\n", atomic_read(&global_irq_count), global_irq_lock); \
stuck = INIT_STUCK; \
}
static inline void wait_on_irq(int cpu, unsigned long where)
{
int stuck = INIT_STUCK;
int local_count = local_irq_count[cpu];
while(local_count != atomic_read(&global_irq_count)) {
atomic_sub(local_count, &global_irq_count);
spin_unlock(&global_irq_lock);
while(1) {
STUCK;
if(atomic_read(&global_irq_count))
continue;
if(global_irq_lock)
continue;
if(spin_trylock(&global_irq_lock))
break;
}
atomic_add(local_count, &global_irq_count);
}
}
/* There has to be a better way. */
void synchronize_irq(void)
{
int cpu = smp_processor_id();
int local_count = local_irq_count[cpu];
if(local_count != atomic_read(&global_irq_count)) {
unsigned long flags;
/* See comment below at __global_save_flags to understand
* why we must do it this way on Sparc.
*/
save_and_cli(flags);
restore_flags(flags);
}
}
#undef INIT_STUCK
#define INIT_STUCK 10000000
#undef STUCK
#define STUCK \
if (!--stuck) {printk("get_irqlock stuck at %08lx, waiting for %08lx\n", where, previous_irqholder); stuck = INIT_STUCK;}
static inline void get_irqlock(int cpu, unsigned long where)
{
int stuck = INIT_STUCK;
if(!spin_trylock(&global_irq_lock)) {
if((unsigned char) cpu == global_irq_holder)
return;
do {
do {
STUCK;
barrier();
} while(global_irq_lock);
} while(!spin_trylock(&global_irq_lock));
}
wait_on_irq(cpu, where);
global_irq_holder = cpu;
previous_irqholder = where;
}
void __global_cli(void)
{
int cpu = smp_processor_id();
unsigned long where;
__asm__ __volatile__("mov %%i7, %0\n\t" : "=r" (where));
__cli();
get_irqlock(cpu, where);
}
void __global_sti(void)
{
release_irqlock(smp_processor_id());
__sti();
}
/* Yes, I know this is broken, but for the time being...
*
* On Sparc we must differentiate between real local processor
* interrupts being disabled and global interrupt locking, this
* is so that interrupt handlers which call this stuff don't get
* interrupts turned back on when restore_flags() runs because
* our current drivers will be very surprised about this, yes I
* know they need to be fixed... -DaveM
*/
unsigned long __global_save_flags(void)
{
unsigned long flags, retval = 0;
__save_flags(flags);
if(global_irq_holder == (unsigned char) smp_processor_id())
retval |= 1;
if(flags & PSR_PIL)
retval |= 2;
return retval;
}
void __global_restore_flags(unsigned long flags)
{
if(flags & 1) {
__global_cli();
} else {
release_irqlock(smp_processor_id());
if(flags & 2)
__cli();
else
__sti();
}
}
#undef INIT_STUCK
#define INIT_STUCK 200000000
#undef STUCK
#define STUCK \
if (!--stuck) { \
printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n", \
irq, cpu, global_irq_holder); \
stuck = INIT_STUCK; \
}
static void irq_enter(int cpu, int irq)
{
extern void smp_irq_rotate(int cpu);
int stuck = INIT_STUCK;
smp_irq_rotate(cpu);
hardirq_enter(cpu);
while(global_irq_lock) {
if((unsigned char) cpu == global_irq_holder) {
printk("YEEEE Local interrupts enabled, global disabled\n");
break;
}
STUCK;
barrier();
}
}
static void irq_exit(int cpu, int irq)
{
__cli();
hardirq_exit(cpu);
release_irqlock(cpu);
}
#else /* !__SMP__ */
#define irq_enter(cpu, irq) (local_irq_count[cpu]++)
#define irq_exit(cpu, irq) (local_irq_count[cpu]--)
#endif /* __SMP__ */
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
{
int i;
......@@ -204,11 +404,11 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
void handler_irq(int irq, struct pt_regs * regs)
{
struct irqaction * action;
unsigned int cpu_irq;
unsigned int cpu_irq = irq & NR_IRQS;
int cpu = smp_processor_id();
lock_kernel();
intr_count++;
cpu_irq = irq & NR_IRQS;
disable_pil_irq(cpu_irq);
irq_enter(cpu, irq);
action = *(cpu_irq + irq_action);
kstat.interrupts[cpu_irq]++;
do {
......@@ -217,8 +417,8 @@ void handler_irq(int irq, struct pt_regs * regs)
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
intr_count--;
unlock_kernel();
irq_exit(cpu, irq);
enable_pil_irq(cpu_irq);
}
#ifdef CONFIG_BLK_DEV_FD
......@@ -226,11 +426,13 @@ extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
{
lock_kernel();
intr_count++;
int cpu = smp_processor_id();
disable_pil_irq(irq);
irq_enter(cpu, irq);
floppy_interrupt(irq, dev_id, regs);
intr_count--;
unlock_kernel();
irq_exit(cpu, irq);
disable_pil_irq(irq);
}
#endif
......
/* $Id: muldiv.c,v 1.3 1996/11/26 10:00:28 jj Exp $
/* $Id: muldiv.c,v 1.4 1997/04/11 00:42:08 davem Exp $
* muldiv.c: Hardware multiply/division illegal instruction trap
* for sun4c/sun4 (which do not have those instructions)
*
......@@ -120,7 +120,10 @@ int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
"call .umul\n\t"
" mov %1, %%o1\n\t"
"mov %%o0, %0\n\t"
"mov %%o1, %1\n\t" : "=r" (rs1), "=r" (rs2) : : "o0", "o1", "o2", "o3", "o4", "o5", "o7");
"mov %%o1, %1\n\t"
: "=r" (rs1), "=r" (rs2)
:
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x%08x\n", rs2, rs1);
#endif
......@@ -138,7 +141,10 @@ int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
"call .mul\n\t"
" mov %1, %%o1\n\t"
"mov %%o0, %0\n\t"
"mov %%o1, %1\n\t" : "=r" (rs1), "=r" (rs2) : : "o0", "o1", "o2", "o3", "o4", "o5", "o7");
"mov %%o1, %1\n\t"
: "=r" (rs1), "=r" (rs2)
:
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x%08x\n", rs2, rs1);
#endif
......@@ -165,7 +171,11 @@ int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
"call __udivdi3\n\t"
" mov %1, %%o3\n\t"
"mov %%o1, %0\n\t"
"mov %%o0, %1\n\t" : "=r" (rs1), "=r" (rs2) : "r" (regs->y) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3");
"mov %%o0, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "r" (regs->y)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x\n", rs1);
#endif
......@@ -190,7 +200,11 @@ int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
"call __divdi3\n\t"
" mov %1, %%o3\n\t"
"mov %%o1, %0\n\t"
"mov %%o0, %1\n\t" : "=r" (rs1), "=r" (rs2) : "r" (regs->y) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3");
"mov %%o0, %1\n\t"
: "=r" (rs1), "=r" (rs2)
: "r" (regs->y)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "cc");
#ifdef DEBUG_MULDIV
printk ("0x%x\n", rs1);
#endif
......
/* $Id: process.c,v 1.90 1997/01/31 23:26:16 tdyas Exp $
/* $Id: process.c,v 1.93 1997/04/11 08:55:40 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -57,6 +57,7 @@ asmlinkage int sys_idle(void)
goto out;
/* endless idle loop with no priority at all */
current->priority = -100;
current->counter = -100;
for (;;) {
if (sparc_cpu_model == sun4c) {
......@@ -99,53 +100,35 @@ asmlinkage int sys_idle(void)
#else
/*
* the idle loop on a SparcMultiPenguin...
*/
asmlinkage int sys_idle(void)
{
int ret = -EPERM;
lock_kernel();
if (current->pid != 0)
goto out;
/* endless idle loop with no priority at all */
current->counter = -100;
schedule();
ret = 0;
out:
unlock_kernel();
return ret;
}
/* This is being executed in task 0 'user space'. */
int cpu_idle(void *unused)
{
volatile int *spap = &smp_process_available;
volatile int cval;
current->priority = -100;
while(1) {
if(0==*spap)
continue;
cli();
/* Acquire exclusive access. */
while((cval = smp_swap(spap, -1)) == -1)
while(*spap == -1)
;
if (0==cval) {
/* ho hum, release it. */
*spap = 0;
sti();
continue;
}
/* Something interesting happened, whee... */
*spap = (cval - 1);
sti();
idle();
/*
* tq_scheduler currently assumes we're running in a process
* context (ie that we hold the kernel lock..)
*/
if (tq_scheduler) {
lock_kernel();
run_task_queue(&tq_scheduler);
unlock_kernel();
}
/* endless idle loop with no priority at all */
current->counter = -100;
schedule();
}
}
asmlinkage int sys_idle(void)
{
if(current->pid != 0)
return -EPERM;
cpu_idle(NULL);
return 0;
}
#endif
extern char reboot_command [];
......
......@@ -592,6 +592,9 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long tmp;
int res;
/* XXX Find out what is really going on. */
flush_cache_all();
/* Non-word alignment _not_ allowed on Sparc. */
if(addr & (sizeof(unsigned long) - 1)) {
pt_error_return(regs, EINVAL);
......
/* $Id: rtrap.S,v 1.45 1997/03/04 16:26:27 jj Exp $
/* $Id: rtrap.S,v 1.46 1997/04/01 02:21:48 davem Exp $
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -48,13 +48,8 @@ rtrap_7win_patch5: and %g1, 0x7f, %g1
.globl rtrap_patch3, rtrap_patch4, rtrap_patch5
.globl C_LABEL(ret_trap_lockless_ipi)
ret_trap_entry:
sethi %hi(C_LABEL(intr_count)), %g4
ld [%g4 + %lo(C_LABEL(intr_count))], %g5
orcc %g5, 0x0, %g0
sethi %hi(C_LABEL(bh_active)), %l3
bne C_LABEL(ret_trap_lockless_ipi)
sethi %hi(C_LABEL(bh_mask)), %l4
9:
sethi %hi(C_LABEL(bh_mask)), %l4
ld [%l4 + %lo(C_LABEL(bh_mask))], %g5
ld [%l3 + %lo(C_LABEL(bh_active))], %g4
andcc %g4, %g5, %g0
......@@ -62,7 +57,6 @@ ret_trap_entry:
nop
call C_LABEL(do_bottom_half)
nop
b,a 9b
C_LABEL(ret_trap_lockless_ipi):
andcc %t_psr, PSR_PS, %g0
......
/* $Id: setup.c,v 1.82 1997/03/08 08:27:04 ecd Exp $
/* $Id: setup.c,v 1.83 1997/04/01 02:21:49 davem Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -24,6 +24,7 @@
#include <linux/string.h>
#include <linux/blk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/segment.h>
#include <asm/system.h>
......@@ -38,6 +39,9 @@
#include <asm/kdebug.h>
#include <asm/mbus.h>
#include <asm/idprom.h>
#include <asm/spinlock.h>
#include <asm/softirq.h>
#include <asm/hardirq.h>
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
......@@ -75,8 +79,13 @@ asmlinkage void sys_sync(void); /* it's really int */
void prom_sync_me(void)
{
unsigned long prom_tbr, flags;
int cpu = smp_processor_id();
save_and_cli(flags);
#ifdef __SMP__
global_irq_holder = NO_PROC_ID;
global_irq_lock = global_bh_lock = 0;
#endif
__save_and_cli(flags);
__asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t"
......@@ -89,9 +98,9 @@ void prom_sync_me(void)
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas();
if(current->pid != 0) {
sti();
__sti();
sys_sync();
cli();
__cli();
}
prom_printf("Returning to prom\n");
......@@ -99,7 +108,7 @@ void prom_sync_me(void)
"nop\n\t"
"nop\n\t"
"nop\n\t" : : "r" (prom_tbr));
restore_flags(flags);
__restore_flags(flags);
return;
}
......
This diff is collapsed.
/* $Id: sparc_ksyms.c,v 1.49 1997/03/15 07:47:45 davem Exp $
/* $Id: sparc_ksyms.c,v 1.54 1997/04/14 05:38:25 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/in6.h>
#include <asm/oplib.h>
......@@ -26,6 +27,9 @@
#include <asm/smp.h>
#include <asm/mostek.h>
#include <asm/ptrace.h>
#include <asm/spinlock.h>
#include <asm/softirq.h>
#include <asm/hardirq.h>
#include <asm/user.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
......@@ -82,6 +86,8 @@ EXPORT_SYMBOL(klock_info);
#endif
EXPORT_SYMBOL_PRIVATE(_lock_kernel);
EXPORT_SYMBOL_PRIVATE(_unlock_kernel);
EXPORT_SYMBOL_PRIVATE(_spinlock_waitfor);
EXPORT_SYMBOL(__sparc_bh_counter);
EXPORT_SYMBOL(page_offset);
EXPORT_SYMBOL(stack_top);
......@@ -97,6 +103,20 @@ EXPORT_SYMBOL_PRIVATE(_change_bit);
EXPORT_SYMBOL_PRIVATE(_set_le_bit);
EXPORT_SYMBOL_PRIVATE(_clear_le_bit);
/* IRQ implementation. */
EXPORT_SYMBOL(local_irq_count);
#ifdef __SMP__
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(global_irq_lock);
EXPORT_SYMBOL(global_bh_lock);
EXPORT_SYMBOL(global_irq_count);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
EXPORT_SYMBOL(synchronize_irq);
#endif
EXPORT_SYMBOL(udelay);
EXPORT_SYMBOL(mstk48t02_regs);
#if CONFIG_SUN_AUXIO
......@@ -113,7 +133,7 @@ EXPORT_SYMBOL(mmu_get_scsi_sgl);
EXPORT_SYMBOL(mmu_get_scsi_one);
EXPORT_SYMBOL(mmu_release_scsi_sgl);
EXPORT_SYMBOL(mmu_release_scsi_one);
EXPORT_SYMBOL(sparc_dvma_malloc);
EXPORT_SYMBOL(_sparc_dvma_malloc);
EXPORT_SYMBOL(sun4c_unmapioaddr);
EXPORT_SYMBOL(srmmu_unmapioaddr);
#if CONFIG_SBUS
......
......@@ -128,7 +128,7 @@ __initfunc(static void sun4c_init_timers(void (*counter_fn)(int, void *, struct
/* Map the Timer chip, this is implemented in hardware inside
* the cache chip on the sun4c.
*/
sun4c_timers = sparc_alloc_io ((void *) SUN4C_TIMER_PHYSADDR, 0,
sun4c_timers = sparc_alloc_io (SUN4C_TIMER_PHYSADDR, 0,
sizeof(struct sun4c_timer_info),
"timer", 0x0, 0x0);
......@@ -174,6 +174,8 @@ __initfunc(void sun4c_init_IRQ(void))
int_regs[0].which_io, 0x0);
enable_irq = sun4c_enable_irq;
disable_irq = sun4c_disable_irq;
enable_pil_irq = sun4c_enable_irq;
disable_pil_irq = sun4c_disable_irq;
clear_clock_irq = sun4c_clear_clock_irq;
clear_profile_irq = sun4c_clear_profile_irq;
load_profile_irq = sun4c_load_profile_irq;
......@@ -184,5 +186,5 @@ __initfunc(void sun4c_init_IRQ(void))
set_irq_udt = (void (*) (int))sun4c_nop;
#endif
*interrupt_enable = (SUN4C_INT_ENABLE);
sti();
/* Cannot enable interrupts until OBP ticker is disabled. */
}
......@@ -130,6 +130,38 @@ static void sun4m_enable_irq(unsigned int irq_nr)
}
}
static unsigned long cpu_pil_to_imask[16] = {
/*0*/ 0x00000000,
/*1*/ 0x00000000,
/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0),
/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1),
/*4*/ SUN4M_INT_SCSI,
/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
/*6*/ SUN4M_INT_ETHERNET,
/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
/*8*/ SUN4M_INT_VIDEO,
/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
/*10*/ SUN4M_INT_REALTIME,
/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
/*13*/ SUN4M_INT_AUDIO,
/*14*/ 0x00000000,
/*15*/ 0x00000000
};
/* We assume the caller is local cli()'d when these are called, or else
* very bizarre behavior will result.
*/
static void sun4m_disable_pil_irq(unsigned int pil)
{
sun4m_interrupts->set = cpu_pil_to_imask[pil];
}
static void sun4m_enable_pil_irq(unsigned int pil)
{
sun4m_interrupts->clear = cpu_pil_to_imask[pil];
}
void sun4m_send_ipi(int cpu, int level)
{
unsigned long mask;
......@@ -272,7 +304,7 @@ __initfunc(void sun4m_init_IRQ(void))
struct linux_prom_registers int_regs[PROMREG_MAX];
int num_regs;
cli();
__cli();
if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
(ie_node = prom_getchild (ie_node)) == 0 ||
(ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
......@@ -327,6 +359,8 @@ __initfunc(void sun4m_init_IRQ(void))
}
enable_irq = sun4m_enable_irq;
disable_irq = sun4m_disable_irq;
enable_pil_irq = sun4m_enable_pil_irq;
disable_pil_irq = sun4m_disable_pil_irq;
clear_clock_irq = sun4m_clear_clock_irq;
clear_profile_irq = sun4m_clear_profile_irq;
load_profile_irq = sun4m_load_profile_irq;
......@@ -336,5 +370,5 @@ __initfunc(void sun4m_init_IRQ(void))
clear_cpu_int = (void (*) (int, int))sun4m_clear_ipi;
set_irq_udt = (void (*) (int))sun4m_set_udt;
#endif
sti();
/* Cannot enable interrupts until OBP ticker is disabled. */
}
/* $Id: time.c,v 1.23 1997/01/26 04:28:34 davem Exp $
/* $Id: time.c,v 1.27 1997/04/14 05:38:31 davem Exp $
* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -54,6 +54,11 @@ void timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
last_rtc_update = xtime.tv_sec;
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
#ifdef __SMP__
/* I really think it should not be done this way... -DaveM */
smp_message_pass(MSG_ALL_BUT_SELF, MSG_RESCHEDULE, 0L, 0);
#endif
}
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
......@@ -198,7 +203,7 @@ __initfunc(static void clock_probe(void))
prom_apply_obio_ranges(clk_reg, 1);
/* Map the clock register io area read-only */
mstk48t02_regs = (struct mostek48t02 *)
sparc_alloc_io((void *) clk_reg[0].phys_addr,
sparc_alloc_io(clk_reg[0].phys_addr,
(void *) 0, sizeof(*mstk48t02_regs),
"clock", clk_reg[0].which_io, 0x0);
mstk48t08_regs = 0; /* To catch weirdness */
......@@ -215,7 +220,7 @@ __initfunc(static void clock_probe(void))
prom_apply_obio_ranges(clk_reg, 1);
/* Map the clock register io area read-only */
mstk48t08_regs = (struct mostek48t08 *)
sparc_alloc_io((void *) clk_reg[0].phys_addr,
sparc_alloc_io(clk_reg[0].phys_addr,
(void *) 0, sizeof(*mstk48t08_regs),
"clock", clk_reg[0].which_io, 0x0);
......@@ -265,7 +270,9 @@ __initfunc(void time_init(void))
xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
xtime.tv_usec = 0;
mregs->creg &= ~MSTK_CREG_READ;
return;
/* Now that OBP ticker has been silenced, it is safe to enable IRQ. */
__sti();
}
static __inline__ unsigned long do_gettimeoffset(void)
......@@ -283,21 +290,49 @@ static __inline__ unsigned long do_gettimeoffset(void)
void do_gettimeofday(struct timeval *tv)
{
#if CONFIG_AP1000
unsigned long flags;
save_and_cli(flags);
#if CONFIG_AP1000
ap_gettimeofday(&xtime);
#endif
*tv = xtime;
#if !CONFIG_AP1000
tv->tv_usec += do_gettimeoffset();
if(tv->tv_usec >= 1000000) {
tv->tv_usec -= 1000000;
tv->tv_sec++;
}
#endif
restore_flags(flags);
#else /* !(CONFIG_AP1000) */
/* Load doubles must be used on xtime so that what we get
* is guarenteed to be atomic, this is why we can run this
* with interrupts on full blast. Don't touch this... -DaveM
*/
__asm__ __volatile__("
sethi %hi(master_l10_counter), %o1
ld [%o1 + %lo(master_l10_counter)], %g3
sethi %hi(xtime), %g2
1: ldd [%g2 + %lo(xtime)], %o4
ld [%g3], %o1
ldd [%g2 + %lo(xtime)], %o2
xor %o4, %o2, %o2
xor %o5, %o3, %o3
orcc %o2, %o3, %g0
bne 1b
subcc %o1, 0x0, %g0
bpos 1f
srl %o1, 0xa, %o1
sethi %hi(0x2710), %o3
or %o3, %lo(0x2710), %o3
sethi %hi(0x1fffff), %o2
or %o2, %lo(0x1fffff), %o2
add %o5, %o3, %o5
and %o1, %o2, %o1
1: add %o5, %o1, %o5
sethi %hi(1000000), %o2
or %o2, %lo(1000000), %o2
cmp %o5, %o2
bl,a 1f
st %o4, [%o0 + 0x0]
add %o4, 0x1, %o4
sub %o5, %o2, %o5
st %o4, [%o0 + 0x0]
1: st %o5, [%o0 + 0x4]");
#endif
}
void do_settimeofday(struct timeval *tv)
......
/* $Id: trampoline.S,v 1.5 1996/09/22 06:43:10 davem Exp $
* mp.S: Multiprocessor low-level routines on the Sparc.
/* $Id: trampoline.S,v 1.6 1997/04/14 05:38:33 davem Exp $
* trampoline.S: SMP cpu boot-up trampoline code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
......
/* $Id: unaligned.c,v 1.16 1997/03/18 17:53:44 jj Exp $
/* $Id: unaligned.c,v 1.17 1997/04/11 00:42:08 davem Exp $
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
......@@ -204,7 +204,7 @@ __asm__ __volatile__ ( \
".word 16b, " #errh "\n\n\t" \
".previous\n\t" \
: : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \
: "l1", "l2", "g7", "g1"); \
: "l1", "l2", "g7", "g1", "cc"); \
})
#define store_common(dst_addr, size, src_val, errh) ({ \
......@@ -258,7 +258,7 @@ __asm__ __volatile__ ( \
".word 17b, " #errh "\n\n\t" \
".previous\n\t" \
: : "r" (dst_addr), "r" (size), "r" (src_val) \
: "l1", "l2", "g7", "g1"); \
: "l1", "l2", "g7", "g1", "cc"); \
})
#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \
......@@ -343,8 +343,10 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
"mov %0, %%o0\n\t"
"call kernel_mna_trap_fault\n\t"
" mov %1, %%o1\n\t"
: : "r" (regs), "r" (insn)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g4", "g5", "g7");
:
: "r" (regs), "r" (insn)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "g4", "g5", "g7", "cc");
} else {
unsigned long addr = compute_effective_address(regs, insn);
......@@ -476,8 +478,10 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
"mov %0, %%o0\n\t"
"call user_mna_trap_fault\n\t"
" mov %1, %%o1\n\t"
: : "r" (regs), "r" (insn)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g4", "g5", "g7");
:
: "r" (regs), "r" (insn)
: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
"g1", "g2", "g3", "g4", "g5", "g7", "cc");
goto out;
}
advance(regs);
......
......@@ -33,7 +33,7 @@ void flush_user_windows(void)
: "=&r" (ctr)
: "0" (ctr),
"i" ((const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask)))
: "g4");
: "g4", "cc");
}
static inline void shift_window_buffer(int first_win, int last_win, struct thread_struct *tp)
......
......@@ -10,10 +10,6 @@
.text
.align 4
/* XXX At boot time patch this with swap [x], y; retl; if
* XXX processor is found to have that instruction.
*/
.globl ___xchg32
___xchg32:
rd %psr, %g3
......@@ -34,51 +30,47 @@ ___xchg32:
jmpl %o7, %g0 /* Note, not + 0x8, see call in system.h */
mov %g4, %o7
.globl ___xchg32_hw
___xchg32_hw:
swap [%g1], %g2
jmpl %o7, %g0 /* Note, not + 0x8, see call in system.h */
mov %g4, %o7
/* Atomic add/sub routines. Returns the final value whether you
* want it or not for even _better_ cache hit rates.
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header.
*/
.globl ___atomic_add
___atomic_add:
rd %psr, %g3
andcc %g3, PSR_PIL, %g0
bne 1f
nop
wr %g3, PSR_PIL, %psr
nop; nop; nop;
1:
ld [%g1], %g7
andcc %g3, PSR_PIL, %g0
add %g7, %g2, %g2
bne 1f
st %g2, [%g1]
wr %g3, 0x0, %psr
nop; nop; nop;
1:
jmpl %o7, %g0 /* NOTE: not + 8, see callers in atomic.h */
mov %g4, %o7
rd %psr, %g3 ! Keep the code small, old way was stupid
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef __SMP__
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
#endif
ld [%g1], %g7 ! Load locked atomic_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
add %g7, %g2, %g2 ! Add in argument
sll %g2, 8, %g7 ! Transpose back to atomic_t
st %g7, [%g1] ! Clever: This releases the lock as well.
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
.globl ___atomic_sub
___atomic_sub:
rd %psr, %g3
andcc %g3, PSR_PIL, %g0
bne 1f
nop
wr %g3, PSR_PIL, %psr
nop; nop; nop;
1:
ld [%g1], %g7
andcc %g3, PSR_PIL, %g0
sub %g7, %g2, %g2
bne 1f
st %g2, [%g1]
wr %g3, 0x0, %psr
nop; nop; nop;
1:
jmpl %o7, %g0 /* NOTE: not + 8, see callers in atomic.h */
mov %g4, %o7
rd %psr, %g3 ! Keep the code small, old way was stupid
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef __SMP__
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
#endif
ld [%g1], %g7 ! Load locked atomic_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
sub %g7, %g2, %g2 ! Subtract argument
sll %g2, 8, %g7 ! Transpose back to atomic_t
st %g7, [%g1] ! Clever: This releases the lock as well
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
/* $Id: locks.S,v 1.4 1997/03/04 16:26:41 jj Exp $
/* $Id: locks.S,v 1.9 1997/04/14 05:38:41 davem Exp $
* locks.S: SMP low-level lock primitives on Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -7,6 +7,7 @@
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/spinlock.h>
.text
.align 4
......@@ -24,38 +25,15 @@
.globl ___spinlock_waitfor
___spinlock_waitfor:
1: orcc %g2, 0x0, %g0
bne 1b
bne,a 1b
ldub [%g1], %g2
ldstub [%g1], %g2
jmpl %o7 - 12, %g0
mov %g4, %o7
/* This is called when the kernel master lock holder changes,
* caller's PC is in %o7, %o7 must be restored to the value
* in %g4 when returning. The interrupt receiver cpu is to
* change to the new kernel lock holder before returning.
* The current implementation assumes that irq_rcvreg is a
* pointer to a word sized register which can be written with
* the MID value of the cpu to receive undirected interrupts.
* CPUID is in %g5, and mid_xlate is a byte table which translates
* CPUID values into the corresponding MID.
*/
.globl ___become_idt
___become_idt:
#ifdef __SMP__
sethi %hi(C_LABEL(mid_xlate)), %g2
or %g2, %lo(C_LABEL(mid_xlate)), %g2
ldub [%g5 + %g2], %g7
sethi %hi(C_LABEL(irq_rcvreg)), %g2
ld [%g2 + %lo(C_LABEL(irq_rcvreg))], %g2
st %g7, [%g2]
#endif
jmpl %o7 + 8, %g0
mov %g4, %o7
___lk_busy_spin:
orcc %g2, 0, %g0
bne ___lk_busy_spin
bne,a ___lk_busy_spin
ldub [%g1 + 0], %g2
b 1f
ldstub [%g1 + 0], %g2
......@@ -73,25 +51,36 @@ ___lock_kernel:
1: orcc %g2, 0, %g0
bne,a ___lk_busy_spin
ldub [%g1 + 0], %g2
ldub [%g1 + 2], %g2
cmp %g2, %g5
be 2f
stb %g5, [%g1 + 1]
stb %g5, [%g1 + 2]
#ifdef __SMP__
set C_LABEL(mid_xlate), %g2
ldub [%g2 + %g5], %g7
sethi %hi(C_LABEL(irq_rcvreg)), %g2
ld [%g2 + %lo(C_LABEL(irq_rcvreg))], %g2
st %g7, [%g2]
#endif
2: mov -1, %g2
stb %g5, [%g1 + 1]
mov -1, %g2
st %g2, [%g6 + AOFF_task_lock_depth]
wr %g3, 0x0, %psr
nop; nop; nop
9: jmpl %o7 + 0x8, %g0
mov %g4, %o7
.globl ___lock_reaquire_kernel
___lock_reaquire_kernel:
rd %psr, %g3
or %g3, PSR_PIL, %g7
wr %g7, 0x0, %psr
nop; nop; nop
st %g2, [%g6 + AOFF_task_lock_depth]
ldstub [%g1 + 0], %g2
1: orcc %g2, 0, %g0
be 3f
ldub [%g1 + 0], %g2
2: orcc %g2, 0, %g0
bne,a 2b
ldub [%g1 + 0], %g2
b 1b
ldstub [%g1 + 0], %g2
3: stb %g5, [%g1 + 1]
wr %g3, 0x0, %psr
nop; nop; nop
jmpl %o7 + 0x8, %g0
mov %g4, %o7
#undef NO_PROC_ID
#define NO_PROC_ID 0xff
......
/* $Id: hypersparc.S,v 1.1 1997/03/10 09:16:52 davem Exp $
/* $Id: hypersparc.S,v 1.3 1997/04/13 06:38:13 davem Exp $
* hypersparc.S: High speed Hypersparc mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -31,7 +31,6 @@
.globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
.globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
/* Verified... */
hypersparc_flush_cache_all:
WINDOW_FLUSH(%g4, %g5)
sethi %hi(vac_cache_size), %g4
......@@ -46,7 +45,6 @@ hypersparc_flush_cache_all:
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
/* We expand the window flush to get maximum performance. */
/* Verified... */
hypersparc_flush_cache_mm:
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
......@@ -84,7 +82,6 @@ hypersparc_flush_cache_mm_out:
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
/* The things we do for performance... */
/* Verified... */
hypersparc_flush_cache_range:
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
......@@ -174,12 +171,12 @@ hypersparc_flush_cache_range_out:
/* HyperSparc requires a valid mapping where we are about to flush
* in order to check for a physical tag match during the flush.
*/
/* Verified... */
/* Verified, my ass... */
hypersparc_flush_cache_page:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
ld [%o0 + AOFF_mm_context], %g2
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
cmp %g2, -1
be hypersparc_flush_cache_page_out
#endif
WINDOW_FLUSH(%g4, %g5)
......@@ -189,7 +186,7 @@ hypersparc_flush_cache_page:
mov SRMMU_CTX_REG, %o3
andn %o1, (PAGE_SIZE - 1), %o1
lda [%o3] ASI_M_MMUREGS, %o2
sta %g1, [%o3] ASI_M_MMUREGS
sta %g2, [%o3] ASI_M_MMUREGS
or %o1, 0x400, %o5
lda [%o5] ASI_M_FLUSH_PROBE, %g1
orcc %g0, %g1, %g0
......@@ -223,12 +220,15 @@ hypersparc_flush_cache_page:
lda [%g7] ASI_M_MMUREGS, %g0
sta %o2, [%g4] ASI_M_MMUREGS
hypersparc_flush_cache_page_out:
hypersparc_flush_sig_insns: /* This is "neat"... */
retl
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE
hypersparc_flush_sig_insns:
flush %o2
retl
flush %o2 + 4
/* HyperSparc is copy-back. */
/* Verified... */
hypersparc_flush_page_to_ram:
hypersparc_flush_chunk:
sethi %hi(vac_line_size), %g1
......@@ -271,13 +271,11 @@ hypersparc_flush_page_for_dma:
retl
nop
/* Verified... */
hypersparc_flush_tlb_all:
mov 0x400, %g1
retl
sta %g0, [%g1] ASI_M_FLUSH_PROBE
/* Verified... */
hypersparc_flush_tlb_mm:
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o1
......@@ -293,7 +291,6 @@ hypersparc_flush_tlb_mm_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
/* Verified... */
hypersparc_flush_tlb_range:
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
......@@ -317,7 +314,6 @@ hypersparc_flush_tlb_range_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
/* Verified... */
hypersparc_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
mov SRMMU_CTX_REG, %g1
......
/* $Id: init.c,v 1.47 1997/01/02 14:14:28 jj Exp $
/* $Id: init.c,v 1.48 1997/04/12 04:28:37 davem Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -72,10 +72,10 @@ void show_mem(void)
total++;
if (PageReserved(mem_map + i))
reserved++;
else if (!mem_map[i].count)
else if (!atomic_read(&mem_map[i].count))
free++;
else
shared += mem_map[i].count-1;
shared += atomic_read(&mem_map[i].count) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
......@@ -241,7 +241,7 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
datapages++;
continue;
}
mem_map[MAP_NR(addr)].count = 1;
atomic_set(&mem_map[MAP_NR(addr)].count, 1);
num_physpages++;
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
......@@ -272,7 +272,7 @@ void free_initmem (void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
mem_map[MAP_NR(addr)].count = 1;
atomic_set(&mem_map[MAP_NR(addr)].count, 1);
free_page(addr);
}
printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
......@@ -291,9 +291,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map + i))
continue;
val->totalram++;
if (!mem_map[i].count)
if (!atomic_read(&mem_map[i].count))
continue;
val->sharedram += mem_map[i].count-1;
val->sharedram += atomic_read(&mem_map[i].count) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
......
/* $Id: loadmmu.c,v 1.45 1996/12/30 06:16:28 davem Exp $
/* $Id: loadmmu.c,v 1.46 1997/04/10 05:12:51 davem Exp $
* loadmmu.c: This code loads up all the mm function pointers once the
* machine type has been determined. It also sets the static
* mmu values such as PAGE_NONE, etc.
......@@ -41,9 +41,9 @@ unsigned long (*mmu_p2v)(unsigned long);
char *(*mmu_lockarea)(char *, unsigned long);
void (*mmu_unlockarea)(char *, unsigned long);
char *(*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
__u32 (*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
void (*mmu_release_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
void (*mmu_release_scsi_one)(__u32, unsigned long, struct linux_sbus *sbus);
void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
void (*mmu_map_dma_area)(unsigned long addr, int len);
......
/* $Id: srmmu.c,v 1.132 1997/03/18 17:56:47 jj Exp $
/* $Id: srmmu.c,v 1.135 1997/04/14 05:38:49 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -674,7 +674,7 @@ static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
: "r" (page | 0x400), "r" (page), "i" (ASI_M_FLUSH_PROBE),
"i" (ASI_M_FLUSH_PAGE), "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS),
"r" (vac_line_size), "i" (PAGE_SIZE)
: "g4", "g5");
: "g4", "g5", "cc");
}
static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
......@@ -1155,7 +1155,7 @@ static void viking_mxcc_flush_page(unsigned long page)
"r" (MXCC_SRCSTREAM),
"r" (MXCC_DESSTREAM),
"r" (MXCC_STREAM_SIZE),
"i" (ASI_M_MXCC) : "g2", "g3");
"i" (ASI_M_MXCC) : "g2", "g3", "cc");
/* This was handcoded after a look at the gcc output from
*
......@@ -1234,7 +1234,7 @@ static void viking_flush_tlb_all(void)
2: subcc %0, 1, %0
bne 2b
restore %%g0, %%g0, %%g0"
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4");
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4", "cc");
srmmu_flush_whole_tlb();
module_stats.invall++;
}
......@@ -1262,7 +1262,7 @@ static void viking_flush_tlb_mm(struct mm_struct *mm)
: "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
"i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE), "0" (ctr),
"i" (UWINMASK_OFFSET)
: "g4");
: "g4", "cc");
module_stats.invmm++;
FLUSH_END
}
......@@ -1283,7 +1283,7 @@ static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, un
2: subcc %0, 1, %0
bne 2b
restore %%g0, %%g0, %%g0"
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4");
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4", "cc");
start &= SRMMU_PGDIR_MASK;
size = SRMMU_PGDIR_ALIGN(end) - start;
__asm__ __volatile__("
......@@ -1297,7 +1297,7 @@ static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, un
: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
"r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
"i" (ASI_M_FLUSH_PROBE)
: "g5");
: "g5", "cc");
module_stats.invrnge++;
FLUSH_END
}
......@@ -1318,7 +1318,7 @@ static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page
2: subcc %0, 1, %0
bne 2b
restore %%g0, %%g0, %%g0"
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4");
: "=&r" (ctr) : "0" (ctr), "i" (UWINMASK_OFFSET) : "g4", "cc");
__asm__ __volatile__("
lda [%0] %3, %%g5
sta %1, [%0] %3
......@@ -1553,7 +1553,7 @@ static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, u
: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
"r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
"i" (ASI_M_FLUSH_PROBE)
: "g5");
: "g5", "cc");
module_stats.invrnge++;
FLUSH_END
}
......@@ -1822,7 +1822,7 @@ void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus)
sbus->iommu = (struct iommu_struct *)iommu;
}
static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
static __u32 srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
......@@ -1830,7 +1830,7 @@ static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbu
flush_page_for_dma(page);
page += PAGE_SIZE;
}
return vaddr;
return (__u32)vaddr;
}
static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
......@@ -1843,12 +1843,12 @@ static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus
flush_page_for_dma(page);
page += PAGE_SIZE;
}
sg[sz].dvma_addr = (char *) (sg[sz].addr);
sg[sz].dvma_addr = (__u32) (sg[sz].addr);
sz--;
}
}
static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
static void srmmu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus)
{
}
......@@ -2666,7 +2666,10 @@ static void poke_hypersparc(void)
mreg |= (HYPERSPARC_CMODE);
srmmu_set_mmureg(mreg);
#if 0 /* I think this is bad news... -DaveM */
hyper_clear_all_tags();
#endif
put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
hyper_flush_whole_icache();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1676,7 +1676,7 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
} while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2);
}
if (handler) {
if(intr_count >= 2)
if (in_interrupt())
schedule_bh( (void *)(void *) handler);
else
handler();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
torvalds@penguin.transmeta.com
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment