Commit 3985d67e authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64
parents f2e659fa 7fb7f2ac
......@@ -110,7 +110,7 @@ put_inode: no
delete_inode: no
clear_inode: no
put_super: yes yes maybe (see below)
write_super: yes yes maybe (see below)
write_super: no yes maybe (see below)
statfs: yes no no
remount_fs: yes yes maybe (see below)
umount_begin: yes no maybe (see below)
......
......@@ -153,7 +153,7 @@ a number of operations registered into it at compile time, but is mutable,
and 4 functions are exported for interface to it: __sysrq_lock_table,
__sysrq_unlock_table, __sysrq_get_key_op, and __sysrq_put_key_op. The
functions __sysrq_swap_key_ops and __sysrq_swap_key_ops_nolock are defined
in the header itself, and the REGISTER and UNREGISTER macros are built fromi
in the header itself, and the REGISTER and UNREGISTER macros are built from
these. More complex (and dangerous!) manipulations of the table are possible
using these functions, but you must be careful to always lock the table before
you read or write from it, and to unlock it again when you are done. (And of
......
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 9
SUBLEVEL = 10
EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -791,7 +791,7 @@ flush_instr:
#
# but we yet haven't reloaded the CS register, so the default size
# of the target offset still is 16 bit.
# However, using an operant prefix (0x66), the CPU will properly
# However, using an operand prefix (0x66), the CPU will properly
# take our 48 bit far pointer. (INTeL 80386 Programmer's Reference
# Manual, Mixing 16-bit and 32-bit code, page 16-6)
......
......@@ -15,6 +15,8 @@ define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
define_bool CONFIG_X86_CMPXCHG y
define_bool CONFIG_EARLY_PRINTK y
source init/Config.in
mainmenu_option next_comment
......@@ -210,12 +212,8 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
# bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
bool ' Early printk' CONFIG_EARLY_PRINTK
bool ' Additional run-time checks' CONFIG_CHECKING
bool ' Debug __init statements' CONFIG_INIT_DEBUG
#if [ "$CONFIG_SERIAL_CONSOLE" = "y" ]; then
# bool 'Early serial console (ttyS0)' CONFIG_EARLY_SERIAL_CONSOLE
#fi
fi
endmenu
......
......@@ -9,6 +9,7 @@ CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_X86_CMPXCHG=y
CONFIG_EARLY_PRINTK=y
#
# Code maturity level options
......@@ -282,7 +283,7 @@ CONFIG_IDEDMA_AUTO=y
#
# ISDN subsystem
#
# CONFIG_ISDN is not set
# CONFIG_ISDN_BOOL is not set
#
# Old CD-ROM drivers (not SCSI, not IDE)
......@@ -430,6 +431,7 @@ CONFIG_EXT2_FS=y
# CONFIG_NFSD_TCP is not set
# CONFIG_SUNRPC is not set
# CONFIG_LOCKD is not set
# CONFIG_EXPORTFS is not set
# CONFIG_SMB_FS is not set
# CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set
......@@ -484,7 +486,6 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_EARLY_PRINTK is not set
# CONFIG_CHECKING is not set
# CONFIG_INIT_DEBUG is not set
......
......@@ -3182,7 +3182,6 @@ COMPATIBLE_IOCTL(FIGETBSZ)
*/
COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
COMPATIBLE_IOCTL(HDIO_SET_DMA)
COMPATIBLE_IOCTL(HDIO_SET_KEEPSETTINGS)
COMPATIBLE_IOCTL(HDIO_SET_UNMASKINTR)
COMPATIBLE_IOCTL(HDIO_SET_NOWERR)
COMPATIBLE_IOCTL(HDIO_SET_32BIT)
......@@ -3742,7 +3741,6 @@ HANDLE_IOCTL(BLKSECTGET, w_long)
HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
HANDLE_IOCTL(FBIOPUTCMAP, fb_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
......
#define printk real_printk
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <asm/io.h>
#undef printk
/* Simple VGA output */
#define VGABASE 0xffffffff800b8000ul /* This is "wrong" address to access it, we should access it using 0xffff8000000b8000ul; but 0xffff8000000b8000ul is not available early at boot. */
#define VGABASE 0xffffffff800b8000UL
#define MAX_YPOS 25
#define MAX_XPOS 80
static int current_ypos = 1, current_xpos = 0; /* We want to print before clearing BSS */
static int current_ypos = 1, current_xpos = 0;
void
early_clear (void)
{
int k, i;
for(k = 0; k < MAX_YPOS; k++)
for(i = 0; i < MAX_XPOS; i++)
writew(0, VGABASE + 2*(MAX_XPOS*k + i));
current_ypos = 0;
}
void
early_puts (const char *str)
static void early_vga_write(struct console *con, const char *str, unsigned n)
{
char c;
int i, k, j;
while ((c = *str++) != '\0') {
while ((c = *str++) != '\0' && n-- > 0) {
if (current_ypos >= MAX_YPOS) {
#if 1
/* scroll 1 line up */
for(k = 1, j = 0; k < MAX_YPOS; k++, j++) {
for(i = 0; i < MAX_XPOS; i++) {
......@@ -40,11 +31,6 @@ early_puts (const char *str)
writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
}
current_ypos = MAX_YPOS-1;
#else
/* MUCH faster */
early_clear();
current_ypos = 0;
#endif
}
if (c == '\n') {
current_xpos = 0;
......@@ -60,20 +46,144 @@ early_puts (const char *str)
}
}
static char buf[1024];
static struct console early_vga_console = {
name: "earlyvga",
write: early_vga_write,
flags: CON_PRINTBUFFER,
index: -1,
};
/* Serial functions losely based on a similar package from Klaus P. Gerlicher */
int early_serial_base = 0x3f8; /* ttyS0 */
#define XMTRDY 0x20
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
#define RXR 0 /* Receive register (READ) */
#define IER 1 /* Interrupt Enable */
#define IIR 2 /* Interrupt ID */
#define FCR 2 /* FIFO control */
#define LCR 3 /* Line control */
#define MCR 4 /* Modem control */
#define LSR 5 /* Line Status */
#define MSR 6 /* Modem Status */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
int printk(const char *fmt, ...) __attribute__((alias("early_printk")));
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
rep_nop();
outb(ch, early_serial_base + TXR);
return timeout ? 0 : -1;
}
static void early_serial_write(struct console *con, const char *s, unsigned n)
{
while (*s && n-- > 0) {
early_serial_putc(*s);
if (*s == '\n')
early_serial_putc('\r');
s++;
}
}
int early_printk(const char *fmt, ...)
static __init void early_serial_init(char *opt)
{
va_list args;
int i;
static int bases[] = { 0x3f8, 0x2f8 };
unsigned char c;
unsigned divisor, baud = 38400;
char *s, *e;
va_start(args, fmt);
i = vsprintf(buf, fmt, args); /* hopefully i < sizeof(buf)-4 */
va_end(args);
s = strsep(&opt, ",");
if (s != NULL) {
unsigned port;
++s;
if (!strncmp(s,"ttyS",4))
s+=4;
port = simple_strtoul(s, &e, 10);
if (port > 1 || s == e)
port = 0;
early_serial_base = bases[port];
}
early_puts(buf);
c = inb(early_serial_base + LCR);
outb(c & ~DLAB, early_serial_base + LCR);
outb(0, early_serial_base + IER); /* no interrupt */
outb(0, early_serial_base + FCR); /* no fifo */
outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
return i;
s = strsep(&opt, ",");
if (s != NULL) {
baud = simple_strtoul(s, &e, 0);
if (baud == 0 || s == e)
baud = 38400;
}
divisor = 115200 / baud;
c = inb(early_serial_base + LCR);
outb(c | DLAB, early_serial_base + LCR);
outb(divisor & 0xff, early_serial_base + DLL);
outb((divisor >> 8) & 0xff, early_serial_base + DLH);
outb(c & ~DLAB, early_serial_base + LCR);
}
static struct console early_serial_console = {
name: "earlyser",
write: early_serial_write,
flags: CON_PRINTBUFFER,
index: -1,
};
/* Direct interface for emergencies */
struct console *early_console = &early_vga_console;
static int early_console_initialized = 0;
void early_printk(const char *fmt, ...)
{
char buf[512];
int n;
va_list ap;
va_start(ap,fmt);
n = vsnprintf(buf,512,fmt,ap);
early_console->write(early_console,buf,n);
va_end(ap);
}
int __init setup_early_printk(char *opt)
{
if (early_console_initialized)
return;
early_console_initialized = 1;
if (!strncmp(opt, "serial", 6)) {
early_serial_init(opt+7);
early_console = &early_serial_console;
} else if (!strncmp(opt, "vga", 3))
early_console = &early_vga_console;
else
return -1;
register_console(early_console);
return 0;
}
void __init disable_early_printk(void)
{
if (early_console_initialized) {
unregister_console(early_console);
early_console_initialized = 0;
}
}
/* syntax: earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
Only vga or serial at a time, not both.
Currently only ttyS0 and ttyS1 are supported.
Interaction with the standard serial driver is not very good.
The VGA output is eventually overwritten by the real console. */
__setup("earlyprintk=", setup_early_printk);
......@@ -42,40 +42,6 @@ void __init fpu_init(void)
write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
asm("fninit");
load_mxcsr(0x1f80);
/* initialize MMX state. normally this will be covered by fninit, but the
architecture doesn't guarantee it so do it explicitely. */
asm volatile("movq %0,%%mm0\n\t"
"movq %%mm0,%%mm1\n\t"
"movq %%mm0,%%mm2\n\t"
"movq %%mm0,%%mm3\n\t"
"movq %%mm0,%%mm4\n\t"
"movq %%mm0,%%mm5\n\t"
"movq %%mm0,%%mm6\n\t"
"movq %%mm0,%%mm7\n\t" :: "m" (0ULL));
asm("emms");
/* initialize XMM state */
asm("xorpd %xmm0,%xmm0");
asm("xorpd %xmm1,%xmm1");
asm("xorpd %xmm2,%xmm2");
asm("xorpd %xmm3,%xmm3");
asm("xorpd %xmm4,%xmm4");
asm("xorpd %xmm5,%xmm5");
asm("xorpd %xmm6,%xmm6");
asm("xorpd %xmm7,%xmm7");
asm("xorpd %xmm8,%xmm8");
asm("xorpd %xmm9,%xmm9");
asm("xorpd %xmm10,%xmm10");
asm("xorpd %xmm11,%xmm11");
asm("xorpd %xmm12,%xmm12");
asm("xorpd %xmm13,%xmm13");
asm("xorpd %xmm14,%xmm14");
asm("xorpd %xmm15,%xmm15");
load_mxcsr(0x1f80);
asm volatile("fxsave %0" : "=m" (init_fpu_env));
/* clean state in init */
stts();
clear_thread_flag(TIF_USEDFPU);
......@@ -89,13 +55,11 @@ void __init fpu_init(void)
*/
void init_fpu(void)
{
#if 0
asm("fninit");
load_mxcsr(0x1f80);
#else
asm volatile("fxrstor %0" :: "m" (init_fpu_env));
#endif
current->used_math = 1;
struct task_struct *me = current;
memset(&me->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
me->thread.i387.fxsave.cwd = 0x37f;
me->thread.i387.fxsave.mxcsr = 0x1f80;
me->used_math = 1;
}
/*
......
......@@ -114,19 +114,8 @@ asmlinkage long sys_pause(void)
return -ERESTARTNOHAND;
}
asmlinkage long wrap_sys_shmat(int shmid, char *shmaddr, int shmflg,
unsigned long *raddr_user)
asmlinkage long wrap_sys_shmat(int shmid, char *shmaddr, int shmflg)
{
unsigned long raddr;
return sys_shmat(shmid,shmaddr,shmflg,&raddr) ?: put_user(raddr,raddr_user);
}
asmlinkage long wrap_sys_semctl(int semid, int semnum, int cmd, unsigned long *ptr)
{
unsigned long val;
/* XXX: for cmd==SETVAL the manpage says ptr is the value directly. i386
seems to always get it via a pointer. Follow i386 here. Check this. */
if (get_user(val, ptr))
return -EFAULT;
return sys_semctl(semid, semnum, cmd, (union semun)(void *)val);
return sys_shmat(shmid,shmaddr,shmflg,&raddr) ?: raddr;
}
......@@ -736,11 +736,9 @@ asmlinkage void math_state_restore(void)
struct task_struct *me = current;
clts(); /* Allow maths ops (or we recurse) */
if (me->used_math) {
restore_fpu_checking(&me->thread.i387.fxsave);
} else {
if (!me->used_math)
init_fpu();
}
restore_fpu_checking(&me->thread.i387.fxsave);
set_thread_flag(TIF_USEDFPU);
}
......
......@@ -85,7 +85,7 @@ void dump_pagetable(unsigned long address)
}
int page_fault_trace;
int exception_trace = 1;
int exception_trace;
/*
* This routine handles page faults. It determines the address,
......
......@@ -220,6 +220,7 @@
5145 Radeon QE
5146 Radeon QF
5147 Radeon QG
514c Radeon 8500 QL
5159 Radeon VE QY
515a Radeon VE QZ
5245 Rage 128 RE
......
......@@ -8,7 +8,7 @@ MOD_TARGET = aic7xxx.o
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx_mod.o
# Core files
aix7xxx_mod-objs += aic7xxx.o aic7xxx_93cx6.o aic7770.o
aic7xxx_mod-objs += aic7xxx.o aic7xxx_93cx6.o aic7770.o
# Platform Specific Files
aic7xxx_mod-objs += aic7xxx_linux.o aic7xxx_proc.o aic7770_linux.o
......
......@@ -526,6 +526,7 @@
#include "sd.h"
#include "scsi.h"
#include "hosts.h"
#include <scsi/scsicam.h>
#include "megaraid.h"
......@@ -1107,10 +1108,11 @@ static void mega_cmd_done (mega_host_config * megaCfg, mega_scb * pScb, int stat
status = 0xF0;
}
#endif
if (SCpnt->cmnd[0] == INQUIRY && !islogical) {
if ( SCpnt->use_sg ) {
sgList = (struct scatterlist *)SCpnt->request_buffer;
memcpy(&c, sgList[0].address, 0x1);
memcpy(&c, cpu_to_le32(sg_dma_address(&sgList[0])), 0x1);
} else {
memcpy(&c, SCpnt->request_buffer, 0x1);
}
......@@ -4557,9 +4559,11 @@ static int megadev_ioctl (struct inode *inode, struct file *filep,
#endif
IO_LOCK_T;
if (!inode || !(dev = inode->i_rdev))
if (!inode)
return -EINVAL;
dev = inode->i_rdev;
if (_IOC_TYPE (cmd) != MEGAIOC_MAGIC)
return (-EINVAL);
......@@ -5104,16 +5108,16 @@ mega_del_logdrv(mega_host_config *this_hba, int logdrv)
* Stop sending commands to the controller, queue them internally.
* When deletion is complete, ISR will flush the queue.
*/
IO_LOCK;
IO_LOCK(this_hba->host);
this_hba->quiescent = 1;
IO_UNLOCK;
IO_UNLOCK(this_hba->host);
while( this_hba->qPcnt ) {
sleep_on_timeout( &wq, 1*HZ ); /* sleep for 1s */
}
rval = mega_do_del_logdrv(this_hba, logdrv);
IO_LOCK;
IO_LOCK(this_hba->host);
/*
* Attach the internal queue to the pending queue
*/
......@@ -5158,7 +5162,7 @@ mega_del_logdrv(mega_host_config *this_hba, int logdrv)
}
this_hba->quiescent = 0;
IO_UNLOCK;
IO_UNLOCK(this_hba->host);
return rval;
}
......
......@@ -26,6 +26,7 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......@@ -38,7 +39,7 @@ static void
affs_put_super(struct super_block *sb)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
lock_kernel();
pr_debug("AFFS: put_super()\n");
if (!(sb->s_flags & MS_RDONLY)) {
......@@ -56,7 +57,7 @@ affs_put_super(struct super_block *sb)
affs_brelse(sbi->s_root_bh);
kfree(sbi);
sb->u.generic_sbp = NULL;
unlock_kernel();
return;
}
......@@ -65,7 +66,7 @@ affs_write_super(struct super_block *sb)
{
int clean = 2;
struct affs_sb_info *sbi = AFFS_SB(sb);
lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
// if (sbi->s_bitmap[i].bm_bh) {
// if (buffer_dirty(sbi->s_bitmap[i].bm_bh)) {
......@@ -80,6 +81,7 @@ affs_write_super(struct super_block *sb)
sb->s_dirt = 0;
pr_debug("AFFS: write_super() at %lu, clean=%d\n", CURRENT_TIME, clean);
unlock_kernel();
}
static kmem_cache_t * affs_inode_cachep;
......
......@@ -206,9 +206,11 @@ static int bfs_statfs(struct super_block *s, struct statfs *buf)
static void bfs_write_super(struct super_block *s)
{
lock_kernel();
if (!(s->s_flags & MS_RDONLY))
mark_buffer_dirty(BFS_SB(s)->si_sbh);
s->s_dirt = 0;
unlock_kernel();
}
static kmem_cache_t * bfs_inode_cachep;
......
......@@ -2612,10 +2612,8 @@ void __init buffer_init(unsigned long mempages)
static void sync_old_buffers(unsigned long dummy)
{
lock_kernel();
sync_unlocked_inodes();
sync_supers();
unlock_kernel();
for (;;) {
struct buffer_head *bh;
......
......@@ -25,6 +25,7 @@
#include <linux/locks.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
......@@ -754,7 +755,7 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
void ext2_write_super (struct super_block * sb)
{
struct ext2_super_block * es;
lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
es = EXT2_SB(sb)->s_es;
......@@ -768,6 +769,7 @@ void ext2_write_super (struct super_block * sb)
ext2_commit_super (sb, es);
}
sb->s_dirt = 0;
unlock_kernel();
}
int ext2_remount (struct super_block * sb, int * flags, char * data)
......
......@@ -501,7 +501,7 @@ static struct super_operations ext3_sops = {
put_inode: ext3_put_inode, /* BKL not held. Don't need */
delete_inode: ext3_delete_inode, /* BKL not held. We take it */
put_super: ext3_put_super, /* BKL held */
write_super: ext3_write_super, /* BKL held */
write_super: ext3_write_super, /* BKL not held. We take it. Needed? */
write_super_lockfs: ext3_write_super_lockfs, /* BKL not held. Take it */
unlockfs: ext3_unlockfs, /* BKL not held. We take it */
statfs: ext3_statfs, /* BKL held */
......@@ -1599,7 +1599,7 @@ MODULE_PARM_DESC(do_sync_supers, "Write superblocks synchronously");
void ext3_write_super (struct super_block * sb)
{
tid_t target;
lock_kernel();
if (down_trylock(&sb->s_lock) == 0)
BUG(); /* aviro detector */
sb->s_dirt = 0;
......@@ -1610,6 +1610,7 @@ void ext3_write_super (struct super_block * sb)
log_wait_commit(EXT3_SB(sb)->s_journal, target);
lock_super(sb);
}
unlock_kernel();
}
/*
......
......@@ -31,6 +31,7 @@
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
MODULE_LICENSE("GPL");
......@@ -146,9 +147,10 @@ static void hfs_read_inode(struct inode *inode)
static void hfs_write_super(struct super_block *sb)
{
struct hfs_mdb *mdb = HFS_SB(sb)->s_mdb;
lock_kernel();
/* is this a valid hfs superblock? */
if (!sb || sb->s_magic != HFS_SUPER_MAGIC) {
unlock_kernel();
return;
}
......@@ -157,6 +159,7 @@ static void hfs_write_super(struct super_block *sb)
hfs_mdb_commit(mdb, 0);
}
sb->s_dirt = 0;
unlock_kernel();
}
/*
......
......@@ -1746,8 +1746,9 @@ void
jffs_write_super(struct super_block *sb)
{
struct jffs_control *c = (struct jffs_control *)sb->u.generic_sbp;
lock_kernel();
jffs_garbage_collect_trigger(c);
unlock_kernel();
}
static struct super_operations jffs_ops =
......
......@@ -44,6 +44,7 @@
#include <linux/mtd/mtd.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include "nodelist.h"
int jffs2_statfs(struct super_block *sb, struct statfs *buf)
......@@ -320,16 +321,21 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
void jffs2_write_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
lock_kernel();
sb->s_dirt = 0;
if (sb->s_flags & MS_RDONLY)
if (sb->s_flags & MS_RDONLY) {
unlock_kernel();
return;
}
D1(printk("jffs2_write_super(): flush_wbuf before gc-trigger\n"));
jffs2_flush_wbuf(c, 2);
jffs2_garbage_collect_trigger(c);
jffs2_erase_pending_blocks(c);
jffs2_mark_erased_blocks(c);
unlock_kernel();
}
......
......@@ -72,8 +72,10 @@ static void qnx4_delete_inode(struct inode *inode)
static void qnx4_write_super(struct super_block *sb)
{
lock_kernel();
QNX4DEBUG(("qnx4: write_super\n"));
sb->s_dirt = 0;
unlock_kernel();
}
static void qnx4_write_inode(struct inode *inode, int unused)
......
......@@ -390,7 +390,7 @@ static int clear_prepared_bits(struct buffer_head *bh) {
/* buffer is in current transaction */
inline int buffer_journaled(const struct buffer_head *bh) {
if (bh)
return test_bit(BH_JDirty, ( struct buffer_head * ) &bh->b_state) ;
return test_bit(BH_JDirty, &bh->b_state) ;
else
return 0 ;
}
......@@ -400,7 +400,7 @@ inline int buffer_journaled(const struct buffer_head *bh) {
*/
inline int buffer_journal_new(const struct buffer_head *bh) {
if (bh)
return test_bit(BH_JNew, ( struct buffer_head * )&bh->b_state) ;
return test_bit(BH_JNew, &bh->b_state) ;
else
return 0 ;
}
......
......@@ -33,6 +33,7 @@
/* This is only called on sync() and umount(), when s_dirt=1. */
static void sysv_write_super(struct super_block *sb)
{
lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
/* If we are going to write out the super block,
then attach current time stamp.
......@@ -46,6 +47,7 @@ static void sysv_write_super(struct super_block *sb)
mark_buffer_dirty(sb->sv_bh2);
}
sb->s_dirt = 0;
unlock_kernel();
}
static void sysv_put_super(struct super_block *sb)
......
......@@ -55,6 +55,7 @@
#include <linux/stat.h>
#include <linux/cdrom.h>
#include <linux/nls.h>
#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include <linux/udf_fs.h>
......@@ -359,9 +360,11 @@ udf_parse_options(char *options, struct udf_options *uopt)
void
udf_write_super(struct super_block *sb)
{
lock_kernel();
if (!(sb->s_flags & MS_RDONLY))
udf_open_lvid(sb);
sb->s_dirt = 0;
unlock_kernel();
}
static int
......
......@@ -80,6 +80,7 @@
#include <linux/locks.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include "swab.h"
#include "util.h"
......@@ -822,6 +823,8 @@ void ufs_write_super (struct super_block * sb) {
struct ufs_super_block_third * usb3;
unsigned flags;
lock_kernel();
UFSD(("ENTER\n"))
flags = sb->u.ufs_sb.s_flags;
uspi = sb->u.ufs_sb.s_uspi;
......@@ -838,6 +841,7 @@ void ufs_write_super (struct super_block * sb) {
}
sb->s_dirt = 0;
UFSD(("EXIT\n"))
unlock_kernel();
}
void ufs_put_super (struct super_block * sb)
......
......@@ -244,7 +244,7 @@ static __inline__ int constant_test_bit(int nr, const volatile unsigned long * a
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
static __inline__ int variable_test_bit(int nr, volatile unsigned long * addr)
static __inline__ int variable_test_bit(int nr, const volatile unsigned long * addr)
{
int oldbit;
......
......@@ -153,7 +153,7 @@ __SYSCALL(__NR_semget, sys_semget)
#define __NR_semop 65
__SYSCALL(__NR_semop, sys_semop)
#define __NR_semctl 66
__SYSCALL(__NR_semctl, wrap_sys_semctl)
__SYSCALL(__NR_semctl, sys_semctl)
#define __NR_shmdt 67
__SYSCALL(__NR_shmdt, sys_shmdt)
#define __NR_msgget 68
......
......@@ -1681,7 +1681,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_b
/* why is this kerplunked right here? */
static inline int reiserfs_buffer_prepared(const struct buffer_head *bh) {
if (bh && test_bit(BH_JPrepared, ( struct buffer_head * ) &bh->b_state))
if (bh && test_bit(BH_JPrepared, &bh->b_state))
return 1 ;
else
return 0 ;
......@@ -1690,7 +1690,7 @@ static inline int reiserfs_buffer_prepared(const struct buffer_head *bh) {
/* buffer was journaled, waiting to get to disk */
static inline int buffer_journal_dirty(const struct buffer_head *bh) {
if (bh)
return test_bit(BH_JDirty_wait, ( struct buffer_head * ) &bh->b_state) ;
return test_bit(BH_JDirty_wait, &bh->b_state) ;
else
return 0 ;
}
......
......@@ -45,6 +45,7 @@ struct exec_domain;
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
#define CLONE_THREAD 0x00010000 /* Same thread group? */
#define CLONE_NEWNS 0x00020000 /* New namespace group? */
#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
......@@ -315,8 +316,7 @@ struct task_struct {
struct tty_struct *tty; /* NULL if no tty */
unsigned int locks; /* How many file locks are being held */
/* ipc stuff */
struct sem_undo *semundo;
struct sem_queue *semsleeping;
struct sysv_sem sysvsem;
/* CPU-specific state of this task */
struct thread_struct thread;
/* filesystem information */
......
......@@ -121,6 +121,21 @@ struct sem_undo {
short * semadj; /* array of adjustments, one per semaphore */
};
/* sem_undo_list controls shared access to the list of sem_undo structures
* that may be shared among all a CLONE_SYSVSEM task group.
*/
struct sem_undo_list {
atomic_t refcnt;
spinlock_t lock;
volatile unsigned long add_count;
struct sem_undo *proc_list;
};
struct sysv_sem {
struct sem_undo_list *undo_list;
struct sem_queue *sleep_list;
};
asmlinkage long sys_semget (key_t key, int nsems, int semflg);
asmlinkage long sys_semop (int semid, struct sembuf *sops, unsigned nsops);
asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg);
......
......@@ -67,7 +67,7 @@ struct _snd_mpu401 {
int irq; /* IRQ number of MPU-401 chip (-1 = poll) */
int irq_flags;
unsigned int mode; /* MPU401_MODE_XXXX */
unsigned long mode; /* MPU401_MODE_XXXX */
int timer_invoked;
int (*open_input) (mpu401_t * mpu);
......
......@@ -789,12 +789,75 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
}
}
static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un)
static inline void lock_semundo(void)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
spin_lock(&undo_list->lock);
}
/* This code has an interaction with copy_semundo().
* Consider; two tasks are sharing the undo_list. task1
* acquires the undo_list lock in lock_semundo(). If task2 now
* exits before task1 releases the lock (by calling
* unlock_semundo()), then task1 will never call spin_unlock().
* This leave the sem_undo_list in a locked state. If task1 now creats task3
* and once again shares the sem_undo_list, the sem_undo_list will still be
* locked, and future SEM_UNDO operations will deadlock. This case is
* dealt with in copy_semundo() by having it reinitialize the spin lock when
* the refcnt goes from 1 to 2.
*/
static inline void unlock_semundo(void)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
spin_unlock(&undo_list->lock);
}
/* If the task doesn't already have a undo_list, then allocate one
* here. We guarantee there is only one thread using this undo list,
* and current is THE ONE
*
* If this allocation and assignment succeeds, but later
* portions of this code fail, there is no need to free the sem_undo_list.
* Just let it stay associated with the task, and it'll be freed later
* at exit time.
*
* This can block, so callers must hold no locks.
*/
static inline int get_undo_list(struct sem_undo_list **undo_listp)
{
struct sem_undo_list *undo_list;
int size;
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
size = sizeof(struct sem_undo_list);
undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL);
if (undo_list == NULL)
return -ENOMEM;
memset(undo_list, 0, size);
/* don't initialize unodhd->lock here. It's done
* in copy_semundo() instead.
*/
atomic_set(&undo_list->refcnt, 1);
current->sysvsem.undo_list = undo_list;
}
*undo_listp = undo_list;
return 0;
}
static struct sem_undo* freeundos(struct sem_undo* un)
{
struct sem_undo* u;
struct sem_undo** up;
for(up = &current->semundo;(u=*up);up=&u->proc_next) {
for(up = &current->sysvsem.undo_list->proc_list;(u=*up);up=&u->proc_next) {
if(un==u) {
un=u->proc_next;
*up=un;
......@@ -806,33 +869,87 @@ static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un)
return un->proc_next;
}
/* returns without sem_lock on error! */
static inline struct sem_undo *find_undo(int semid)
{
struct sem_undo *un;
un = NULL;
if (current->sysvsem.undo_list != NULL) {
un = current->sysvsem.undo_list->proc_list;
}
while(un != NULL) {
if(un->semid==semid)
break;
if(un->semid==-1)
un=freeundos(un);
else
un=un->proc_next;
}
return un;
}
/* returns without sem_lock and semundo list locks on error! */
static int alloc_undo(struct sem_array *sma, struct sem_undo** unp, int semid, int alter)
{
int size, nsems, error;
struct sem_undo *un;
struct sem_undo *un, *new_un;
struct sem_undo_list *undo_list;
unsigned long saved_add_count;
nsems = sma->sem_nsems;
size = sizeof(struct sem_undo) + sizeof(short)*nsems;
saved_add_count = 0;
if (current->sysvsem.undo_list != NULL)
saved_add_count = current->sysvsem.undo_list->add_count;
sem_unlock(semid);
unlock_semundo();
error = get_undo_list(&undo_list);
if (error)
return error;
size = sizeof(struct sem_undo) + sizeof(short)*nsems;
un = (struct sem_undo *) kmalloc(size, GFP_KERNEL);
if (!un)
return -ENOMEM;
memset(un, 0, size);
lock_semundo();
error = sem_revalidate(semid, sma, nsems, alter ? S_IWUGO : S_IRUGO);
if(error) {
unlock_semundo();
kfree(un);
return error;
}
/* alloc_undo has just
* released all locks and reacquired them.
* But, another thread may have
* added the semundo we were looking for
* during that time.
* So, we check for it again.
* only initialize and add the new one
* if we don't discover one.
*/
new_un = NULL;
if (current->sysvsem.undo_list->add_count != saved_add_count)
new_un = find_undo(semid);
if (new_un != NULL) {
if (sma->undo != new_un)
BUG();
kfree(un);
un = new_un;
} else {
current->sysvsem.undo_list->add_count++;
un->semadj = (short *) &un[1];
un->semid = semid;
un->proc_next = current->semundo;
current->semundo = un;
un->proc_next = undo_list->proc_list;
undo_list->proc_list = un;
un->id_next = sma->undo;
sma->undo = un;
}
*unp = un;
return 0;
}
......@@ -847,6 +964,7 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
int undos = 0, decrease = 0, alter = 0;
struct sem_queue queue;
if (nsops < 1 || semid < 0)
return -EINVAL;
if (nsops > sc_semopm)
......@@ -860,17 +978,18 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
error=-EFAULT;
goto out_free;
}
lock_semundo();
sma = sem_lock(semid);
error=-EINVAL;
if(sma==NULL)
goto out_free;
goto out_semundo_free;
error = -EIDRM;
if (sem_checkid(sma,semid))
goto out_unlock_free;
goto out_unlock_semundo_free;
error = -EFBIG;
for (sop = sops; sop < sops + nsops; sop++) {
if (sop->sem_num >= sma->sem_nsems)
goto out_unlock_free;
goto out_unlock_semundo_free;
if (sop->sem_flg & SEM_UNDO)
undos++;
if (sop->sem_op < 0)
......@@ -882,24 +1001,18 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
error = -EACCES;
if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
goto out_unlock_free;
goto out_unlock_semundo_free;
if (undos) {
/* Make sure we have an undo structure
* for this process and this semaphore set.
*/
un=current->semundo;
while(un != NULL) {
if(un->semid==semid)
break;
if(un->semid==-1)
un=freeundos(sma,un);
else
un=un->proc_next;
}
un = find_undo(semid);
if (!un) {
error = alloc_undo(sma,&un,semid,alter);
if(error)
if (error)
goto out_free;
}
} else
un = NULL;
......@@ -923,7 +1036,7 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
append_to_queue(sma ,&queue);
else
prepend_to_queue(sma ,&queue);
current->semsleeping = &queue;
current->sysvsem.sleep_list = &queue;
for (;;) {
struct sem_array* tmp;
......@@ -931,16 +1044,18 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
queue.sleeper = current;
current->state = TASK_INTERRUPTIBLE;
sem_unlock(semid);
unlock_semundo();
schedule();
lock_semundo();
tmp = sem_lock(semid);
if(tmp==NULL) {
if(queue.prev != NULL)
BUG();
current->semsleeping = NULL;
current->sysvsem.sleep_list = NULL;
error = -EIDRM;
goto out_free;
goto out_semundo_free;
}
/*
* If queue.status == 1 we where woken up and
......@@ -960,23 +1075,67 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
if (queue.prev) /* got Interrupt */
break;
/* Everything done by update_queue */
current->semsleeping = NULL;
goto out_unlock_free;
current->sysvsem.sleep_list = NULL;
goto out_unlock_semundo_free;
}
}
current->semsleeping = NULL;
current->sysvsem.sleep_list = NULL;
remove_from_queue(sma,&queue);
update:
if (alter)
update_queue (sma);
out_unlock_free:
out_unlock_semundo_free:
sem_unlock(semid);
out_semundo_free:
unlock_semundo();
out_free:
if(sops != fast_sops)
kfree(sops);
return error;
}
/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
* parent and child tasks.
*
* See the notes above unlock_semundo() regarding the spin_lock_init()
* in this code. Initialize the undo_list->lock here instead of get_undo_list()
* because of the reasoning in the comment above unlock_semundo.
*/
int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
if (clone_flags & CLONE_SYSVSEM) {
error = get_undo_list(&undo_list);
if (error)
return error;
if (atomic_read(&undo_list->refcnt) == 1)
spin_lock_init(&undo_list->lock);
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
return 0;
}
static inline void __exit_semundo(struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
undo_list = tsk->sysvsem.undo_list;
if (!atomic_dec_and_test(&undo_list->refcnt))
kfree(undo_list);
}
void exit_semundo(struct task_struct *tsk)
{
if (tsk->sysvsem.undo_list != NULL)
__exit_semundo(tsk);
}
/*
* add semadj values to semaphores, free undo structures.
* undo structures are not freed when semaphore arrays are destroyed
......@@ -994,6 +1153,7 @@ void sem_exit (void)
struct sem_queue *q;
struct sem_undo *u, *un = NULL, **up, **unp;
struct sem_array *sma;
struct sem_undo_list *undo_list;
int nsems, i;
lock_kernel();
......@@ -1001,10 +1161,10 @@ void sem_exit (void)
/* If the current process was sleeping for a semaphore,
* remove it from the queue.
*/
if ((q = current->semsleeping)) {
if ((q = current->sysvsem.sleep_list)) {
int semid = q->id;
sma = sem_lock(semid);
current->semsleeping = NULL;
current->sysvsem.sleep_list = NULL;
if (q->prev) {
if(sma==NULL)
......@@ -1015,7 +1175,14 @@ void sem_exit (void)
sem_unlock(semid);
}
for (up = &current->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
undo_list = current->sysvsem.undo_list;
if ((undo_list == NULL) || (atomic_read(&undo_list->refcnt) != 1))
return;
/* There's no need to hold the semundo list lock, as current
* is the last task exiting for this undo list.
*/
for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
int semid = u->semid;
if(semid == -1)
continue;
......@@ -1053,7 +1220,7 @@ void sem_exit (void)
next_entry:
sem_unlock(semid);
}
current->semundo = NULL;
__exit_semundo(current);
unlock_kernel();
}
......
......@@ -340,6 +340,17 @@ int ipc_parse_version (int *cmd)
* Dummy functions when SYSV IPC isn't configured
*/
int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
return 0;
}
void exit_semundo(struct task_struct *tsk)
{
return;
}
void sem_exit (void)
{
return;
......
......@@ -34,6 +34,9 @@
static kmem_cache_t *task_struct_cachep;
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_semundo(struct task_struct *tsk);
/* The idle threads do not count.. */
int nr_threads;
......@@ -710,8 +713,10 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
retval = -ENOMEM;
/* copy all the process information */
if (copy_files(clone_flags, p))
if (copy_semundo(clone_flags, p))
goto bad_fork_cleanup;
if (copy_files(clone_flags, p))
goto bad_fork_cleanup_semundo;
if (copy_fs(clone_flags, p))
goto bad_fork_cleanup_files;
if (copy_sighand(clone_flags, p))
......@@ -723,7 +728,6 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespace;
p->semundo = NULL;
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
......@@ -815,6 +819,8 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_semundo(p);
bad_fork_cleanup:
put_exec_domain(p->thread_info->exec_domain);
if (p->binfmt && p->binfmt->module)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment