Commit 6490b110 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://gkernel.bkbits.net/libata-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents ef38b911 9ef63c5e
...@@ -497,18 +497,21 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ...@@ -497,18 +497,21 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
*/ */
ret |= !valid_user_regs(regs); ret |= !valid_user_regs(regs);
/*
* Block the signal if we were unsuccessful.
*/
if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&tsk->sighand->siglock);
sigorsets(&tsk->blocked, &tsk->blocked,
&ka->sa.sa_mask);
sigaddset(&tsk->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
}
if (ret == 0) { if (ret == 0) {
if (ka->sa.sa_flags & SA_ONESHOT) if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&tsk->sighand->siglock);
sigorsets(&tsk->blocked, &tsk->blocked,
&ka->sa.sa_mask);
sigaddset(&tsk->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
}
return; return;
} }
......
...@@ -245,8 +245,8 @@ static __u32 cpu_booted_map; ...@@ -245,8 +245,8 @@ static __u32 cpu_booted_map;
static cpumask_t smp_commenced_mask = CPU_MASK_NONE; static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
/* This is for the new dynamic CPU boot code */ /* This is for the new dynamic CPU boot code */
volatile cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callin_map = CPU_MASK_NONE;
volatile cpumask_t cpu_callout_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE;
/* The per processor IRQ masks (these are usually kept in sync) */ /* The per processor IRQ masks (these are usually kept in sync) */
static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
......
...@@ -47,13 +47,17 @@ SECTIONS ...@@ -47,13 +47,17 @@ SECTIONS
.fixup : { *(.fixup) } .fixup : { *(.fixup) }
__start___ex_table = .; __ex_table : {
__ex_table : { *(__ex_table) } __start___ex_table = .;
__stop___ex_table = .; *(__ex_table)
__stop___ex_table = .;
__start___bug_table = .; }
__bug_table : { *(__bug_table) }
__stop___bug_table = .; __bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
/* Read-write section, merged into data segment: */ /* Read-write section, merged into data segment: */
. = ALIGN(4096); . = ALIGN(4096);
......
...@@ -219,8 +219,8 @@ tracesys: ...@@ -219,8 +219,8 @@ tracesys:
movq %r10,%rcx /* fixup for C */ movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8) call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp) movq %rax,RAX-ARGOFFSET(%rsp)
SAVE_REST 1: SAVE_REST
1: movq %rsp,%rdi movq %rsp,%rdi
call syscall_trace call syscall_trace
RESTORE_TOP_OF_STACK %rbx RESTORE_TOP_OF_STACK %rbx
RESTORE_REST RESTORE_REST
......
...@@ -395,7 +395,7 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat, ...@@ -395,7 +395,7 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
for (i = start; i < stopat; i++) { for (i = start; i < stopat; i++) {
struct scatterlist *s = &sg[i]; struct scatterlist *s = &sg[i];
unsigned long start_addr = s->dma_address; unsigned long start_addr = s->dma_address;
BUG_ON(i > 0 && s->offset); BUG_ON(i > start && s->offset);
if (i == start) { if (i == start) {
*sout = *s; *sout = *s;
sout->dma_address = iommu_bus_base; sout->dma_address = iommu_bus_base;
...@@ -410,7 +410,6 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat, ...@@ -410,7 +410,6 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
addr += PAGE_SIZE; addr += PAGE_SIZE;
iommu_page++; iommu_page++;
} }
BUG_ON(i > 0 && addr % PAGE_SIZE);
} }
BUG_ON(iommu_page - iommu_start != pages); BUG_ON(iommu_page - iommu_start != pages);
return 0; return 0;
......
...@@ -126,12 +126,6 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr) ...@@ -126,12 +126,6 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr)
break; break;
} }
} }
#if 1
if (prefetch)
printk("%s: prefetch caused page fault at %lx/%lx\n", current->comm,
regs->rip, addr);
#endif
return prefetch; return prefetch;
} }
...@@ -241,6 +235,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -241,6 +235,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(in_atomic() || !mm)) if (unlikely(in_atomic() || !mm))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
/* Work around K8 erratum #100
K8 in compat mode occasionally jumps to illegal addresses >4GB.
We catch this here in the page fault handler because these
addresses are not reachable. Just detect this case and return.
Any code segment in LDT is compatibility mode. */
if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
(address >> 32))
return;
again: again:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -255,6 +255,7 @@ static decl_subsys(class_obj, &ktype_class_device, &class_hotplug_ops); ...@@ -255,6 +255,7 @@ static decl_subsys(class_obj, &ktype_class_device, &class_hotplug_ops);
void class_device_initialize(struct class_device *class_dev) void class_device_initialize(struct class_device *class_dev)
{ {
kobj_set_kset_s(class_dev, class_obj_subsys);
kobject_init(&class_dev->kobj); kobject_init(&class_dev->kobj);
INIT_LIST_HEAD(&class_dev->node); INIT_LIST_HEAD(&class_dev->node);
} }
...@@ -277,7 +278,6 @@ int class_device_add(struct class_device *class_dev) ...@@ -277,7 +278,6 @@ int class_device_add(struct class_device *class_dev)
/* first, register with generic layer. */ /* first, register with generic layer. */
kobject_set_name(&class_dev->kobj, class_dev->class_id); kobject_set_name(&class_dev->kobj, class_dev->class_id);
kobj_set_kset_s(class_dev, class_obj_subsys);
if (parent) if (parent)
class_dev->kobj.parent = &parent->subsys.kset.kobj; class_dev->kobj.parent = &parent->subsys.kset.kobj;
......
...@@ -915,9 +915,10 @@ static void as_completed_request(request_queue_t *q, struct request *rq) ...@@ -915,9 +915,10 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
if (unlikely(arq->state != AS_RQ_DISPATCHED)) if (unlikely(arq->state != AS_RQ_DISPATCHED))
return; return;
if (ad->changed_batch && ad->nr_dispatched == 1) { if (!blk_fs_request(rq))
WARN_ON(ad->batch_data_dir == arq->is_sync); return;
if (ad->changed_batch && ad->nr_dispatched == 1) {
kblockd_schedule_work(&ad->antic_work); kblockd_schedule_work(&ad->antic_work);
ad->changed_batch = 0; ad->changed_batch = 0;
...@@ -933,7 +934,6 @@ static void as_completed_request(request_queue_t *q, struct request *rq) ...@@ -933,7 +934,6 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
* and writeback caches * and writeback caches
*/ */
if (ad->new_batch && ad->batch_data_dir == arq->is_sync) { if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
WARN_ON(ad->batch_data_dir != REQ_SYNC);
update_write_batch(ad); update_write_batch(ad);
ad->current_batch_expires = jiffies + ad->current_batch_expires = jiffies +
ad->batch_expire[REQ_SYNC]; ad->batch_expire[REQ_SYNC];
......
...@@ -428,9 +428,10 @@ config BLK_DEV_IDEDMA_PCI ...@@ -428,9 +428,10 @@ config BLK_DEV_IDEDMA_PCI
if BLK_DEV_IDEDMA_PCI if BLK_DEV_IDEDMA_PCI
# TCQ is disabled for now
config BLK_DEV_IDE_TCQ config BLK_DEV_IDE_TCQ
bool "ATA tagged command queueing (EXPERIMENTAL)" bool "ATA tagged command queueing (EXPERIMENTAL)"
depends on EXPERIMENTAL depends on EXPERIMENTAL && n
help help
Support for tagged command queueing on ATA disk drives. This enables Support for tagged command queueing on ATA disk drives. This enables
the IDE layer to have multiple in-flight requests on hardware that the IDE layer to have multiple in-flight requests on hardware that
......
...@@ -36,12 +36,10 @@ MODULE_PARM(psmouse_resetafter, "i"); ...@@ -36,12 +36,10 @@ MODULE_PARM(psmouse_resetafter, "i");
MODULE_PARM_DESC(psmouse_resetafter, "Reset Synaptics Touchpad after so many bad packets (0 = never)."); MODULE_PARM_DESC(psmouse_resetafter, "Reset Synaptics Touchpad after so many bad packets (0 = never).");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#define PSMOUSE_LOGITECH_SMARTSCROLL 1
static int psmouse_noext; static int psmouse_noext;
int psmouse_resolution; int psmouse_resolution = 200;
unsigned int psmouse_rate; unsigned int psmouse_rate = 100;
int psmouse_smartscroll = PSMOUSE_LOGITECH_SMARTSCROLL; int psmouse_smartscroll = 1;
unsigned int psmouse_resetafter; unsigned int psmouse_resetafter;
static char *psmouse_protocols[] = { "None", "PS/2", "PS2++", "PS2T++", "GenPS/2", "ImPS/2", "ImExPS/2", "SynPS/2"}; static char *psmouse_protocols[] = { "None", "PS/2", "PS2++", "PS2T++", "GenPS/2", "ImPS/2", "ImExPS/2", "SynPS/2"};
...@@ -466,22 +464,15 @@ static void psmouse_initialize(struct psmouse *psmouse) ...@@ -466,22 +464,15 @@ static void psmouse_initialize(struct psmouse *psmouse)
{ {
unsigned char param[2]; unsigned char param[2];
/* /*
* We set the mouse report rate. * We set the mouse report rate, resolution and scaling.
*/ */
if (psmouse_rate) if (!psmouse_noext) {
psmouse_set_rate(psmouse); psmouse_set_rate(psmouse);
/*
* We also set the resolution and scaling.
*/
if (psmouse_resolution)
psmouse_set_resolution(psmouse); psmouse_set_resolution(psmouse);
psmouse_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11);
psmouse_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11); }
/* /*
* We set the mouse into streaming mode. * We set the mouse into streaming mode.
......
...@@ -841,7 +841,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -841,7 +841,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
} }
if (atomic_dec_and_test(&r1_bio->remaining)) { if (atomic_dec_and_test(&r1_bio->remaining)) {
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 1);
put_buf(r1_bio); put_buf(r1_bio);
} }
} }
......
...@@ -74,7 +74,9 @@ static int max_interrupt_work = 10; ...@@ -74,7 +74,9 @@ static int max_interrupt_work = 10;
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#ifdef CONFIG_MCA
#include <linux/mca.h> #include <linux/mca.h>
#endif
#include <linux/isapnp.h> #include <linux/isapnp.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
......
...@@ -646,7 +646,7 @@ static void __init quirk_disable_pxb(struct pci_dev *pdev) ...@@ -646,7 +646,7 @@ static void __init quirk_disable_pxb(struct pci_dev *pdev)
int interrupt_line_quirk; int interrupt_line_quirk;
static void __init quirk_via_bridge(struct pci_dev *pdev) static void __devinit quirk_via_bridge(struct pci_dev *pdev)
{ {
if(pdev->devfn == 0) if(pdev->devfn == 0)
interrupt_line_quirk = 1; interrupt_line_quirk = 1;
......
...@@ -426,8 +426,11 @@ static int ehci_start (struct usb_hcd *hcd) ...@@ -426,8 +426,11 @@ static int ehci_start (struct usb_hcd *hcd)
*/ */
if (HCC_64BIT_ADDR (hcc_params)) { if (HCC_64BIT_ADDR (hcc_params)) {
writel (0, &ehci->regs->segment); writel (0, &ehci->regs->segment);
#if 0
// this is deeply broken on almost all architectures
if (!pci_set_dma_mask (ehci->hcd.pdev, 0xffffffffffffffffULL)) if (!pci_set_dma_mask (ehci->hcd.pdev, 0xffffffffffffffffULL))
ehci_info (ehci, "enabled 64bit PCI DMA\n"); ehci_info (ehci, "enabled 64bit PCI DMA\n");
#endif
} }
/* help hc dma work well with cachelines */ /* help hc dma work well with cachelines */
......
...@@ -1120,8 +1120,11 @@ static int kaweth_probe( ...@@ -1120,8 +1120,11 @@ static int kaweth_probe(
usb_set_intfdata(intf, kaweth); usb_set_intfdata(intf, kaweth);
#if 0
// dma_supported() is deeply broken on almost all architectures
if (dma_supported (&intf->dev, 0xffffffffffffffffULL)) if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
kaweth->net->features |= NETIF_F_HIGHDMA; kaweth->net->features |= NETIF_F_HIGHDMA;
#endif
SET_NETDEV_DEV(netdev, &intf->dev); SET_NETDEV_DEV(netdev, &intf->dev);
if (register_netdev(netdev) != 0) { if (register_netdev(netdev) != 0) {
......
...@@ -2972,9 +2972,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) ...@@ -2972,9 +2972,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
strcpy (net->name, "usb%d"); strcpy (net->name, "usb%d");
memcpy (net->dev_addr, node_id, sizeof node_id); memcpy (net->dev_addr, node_id, sizeof node_id);
#if 0
// dma_supported() is deeply broken on almost all architectures
// possible with some EHCI controllers // possible with some EHCI controllers
if (dma_supported (&udev->dev, 0xffffffffffffffffULL)) if (dma_supported (&udev->dev, 0xffffffffffffffffULL))
net->features |= NETIF_F_HIGHDMA; net->features |= NETIF_F_HIGHDMA;
#endif
net->change_mtu = usbnet_change_mtu; net->change_mtu = usbnet_change_mtu;
net->get_stats = usbnet_get_stats; net->get_stats = usbnet_get_stats;
......
...@@ -417,10 +417,21 @@ static int usb_stor_control_thread(void * __us) ...@@ -417,10 +417,21 @@ static int usb_stor_control_thread(void * __us)
scsi_unlock(host); scsi_unlock(host);
} /* for (;;) */ } /* for (;;) */
/* notify the exit routine that we're actually exiting now */ /* notify the exit routine that we're actually exiting now
complete(&(us->notify)); *
* complete()/wait_for_completion() is similar to up()/down(),
return 0; * except that complete() is safe in the case where the structure
* is getting deleted in a parallel mode of execution (i.e. just
* after the down() -- that's necessary for the thread-shutdown
* case.
*
* complete_and_exit() goes even further than this -- it is safe in
* the case that the thread of the caller is going away (not just
* the structure) -- this is necessary for the module-remove case.
* This is important in preemption kernels, which transfer the flow
* of execution immediately upon a complete().
*/
complete_and_exit(&(us->notify), 0);
} }
/*********************************************************************** /***********************************************************************
......
...@@ -434,7 +434,7 @@ void cdev_init(struct cdev *cdev, struct file_operations *fops) ...@@ -434,7 +434,7 @@ void cdev_init(struct cdev *cdev, struct file_operations *fops)
static struct kobject *base_probe(dev_t dev, int *part, void *data) static struct kobject *base_probe(dev_t dev, int *part, void *data)
{ {
request_module("char-major-%d", MAJOR(dev)); request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev));
return NULL; return NULL;
} }
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/cramfs_fs.h> #include <linux/cramfs_fs.h>
#include <linux/smp_lock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cramfs_fs_sb.h> #include <linux/cramfs_fs_sb.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -206,10 +205,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -206,10 +205,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
sb_set_blocksize(sb, PAGE_CACHE_SIZE); sb_set_blocksize(sb, PAGE_CACHE_SIZE);
/* Invalidate the read buffers on mount: think disk change.. */ /* Invalidate the read buffers on mount: think disk change.. */
down(&read_mutex);
for (i = 0; i < READ_BUFFERS; i++) for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1; buffer_blocknr[i] = -1;
down(&read_mutex);
/* Read the first block and get the superblock from it */ /* Read the first block and get the superblock from it */
memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
up(&read_mutex); up(&read_mutex);
...@@ -217,7 +216,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -217,7 +216,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
/* Do sanity checks on the superblock */ /* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) { if (super.magic != CRAMFS_MAGIC) {
/* check at 512 byte offset */ /* check at 512 byte offset */
down(&read_mutex);
memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
up(&read_mutex);
if (super.magic != CRAMFS_MAGIC) { if (super.magic != CRAMFS_MAGIC) {
if (!silent) if (!silent)
printk(KERN_ERR "cramfs: wrong magic\n"); printk(KERN_ERR "cramfs: wrong magic\n");
...@@ -288,6 +289,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -288,6 +289,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{ {
struct inode *inode = filp->f_dentry->d_inode; struct inode *inode = filp->f_dentry->d_inode;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
char *buf;
unsigned int offset; unsigned int offset;
int copied; int copied;
...@@ -299,18 +301,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -299,18 +301,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (offset & 3) if (offset & 3)
return -EINVAL; return -EINVAL;
lock_kernel(); buf = kmalloc(256, GFP_KERNEL);
if (!buf)
return -ENOMEM;
copied = 0; copied = 0;
while (offset < inode->i_size) { while (offset < inode->i_size) {
struct cramfs_inode *de; struct cramfs_inode *de;
unsigned long nextoffset; unsigned long nextoffset;
char *name; char *name;
ino_t ino;
mode_t mode;
int namelen, error; int namelen, error;
down(&read_mutex); down(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
up(&read_mutex);
name = (char *)(de+1); name = (char *)(de+1);
/* /*
...@@ -319,17 +324,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -319,17 +324,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* with zeroes. * with zeroes.
*/ */
namelen = de->namelen << 2; namelen = de->namelen << 2;
memcpy(buf, name, namelen);
ino = CRAMINO(de);
mode = de->mode;
up(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen; nextoffset = offset + sizeof(*de) + namelen;
for (;;) { for (;;) {
if (!namelen) { if (!namelen) {
unlock_kernel(); kfree(buf);
return -EIO; return -EIO;
} }
if (name[namelen-1]) if (buf[namelen-1])
break; break;
namelen--; namelen--;
} }
error = filldir(dirent, name, namelen, offset, CRAMINO(de), de->mode >> 12); error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
if (error) if (error)
break; break;
...@@ -337,7 +346,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -337,7 +346,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
filp->f_pos = offset; filp->f_pos = offset;
copied++; copied++;
} }
unlock_kernel(); kfree(buf);
return 0; return 0;
} }
...@@ -349,16 +358,14 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s ...@@ -349,16 +358,14 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
unsigned int offset = 0; unsigned int offset = 0;
int sorted; int sorted;
lock_kernel(); down(&read_mutex);
sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) { while (offset < dir->i_size) {
struct cramfs_inode *de; struct cramfs_inode *de;
char *name; char *name;
int namelen, retval; int namelen, retval;
down(&read_mutex);
de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+256); de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+256);
up(&read_mutex);
name = (char *)(de+1); name = (char *)(de+1);
/* Try to take advantage of sorted directories */ /* Try to take advantage of sorted directories */
...@@ -374,7 +381,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s ...@@ -374,7 +381,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
for (;;) { for (;;) {
if (!namelen) { if (!namelen) {
unlock_kernel(); up(&read_mutex);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
if (name[namelen-1]) if (name[namelen-1])
...@@ -387,15 +394,16 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s ...@@ -387,15 +394,16 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
if (retval > 0) if (retval > 0)
continue; continue;
if (!retval) { if (!retval) {
d_add(dentry, get_cramfs_inode(dir->i_sb, de)); struct cramfs_inode entry = *de;
unlock_kernel(); up(&read_mutex);
d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
return NULL; return NULL;
} }
/* else (retval < 0) */ /* else (retval < 0) */
if (sorted) if (sorted)
break; break;
} }
unlock_kernel(); up(&read_mutex);
d_add(dentry, NULL); d_add(dentry, NULL);
return NULL; return NULL;
} }
...@@ -452,6 +460,7 @@ static struct address_space_operations cramfs_aops = { ...@@ -452,6 +460,7 @@ static struct address_space_operations cramfs_aops = {
* A directory can only readdir * A directory can only readdir
*/ */
static struct file_operations cramfs_directory_operations = { static struct file_operations cramfs_directory_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir, .read = generic_read_dir,
.readdir = cramfs_readdir, .readdir = cramfs_readdir,
}; };
......
...@@ -128,16 +128,17 @@ static struct quota_format_type *find_quota_format(int id) ...@@ -128,16 +128,17 @@ static struct quota_format_type *find_quota_format(int id)
if (!actqf || !try_module_get(actqf->qf_owner)) { if (!actqf || !try_module_get(actqf->qf_owner)) {
int qm; int qm;
spin_unlock(&dq_list_lock);
for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) { if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
actqf = NULL; return NULL;
goto out;
} spin_lock(&dq_list_lock);
for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
if (actqf && !try_module_get(actqf->qf_owner)) if (actqf && !try_module_get(actqf->qf_owner))
actqf = NULL; actqf = NULL;
} }
out:
spin_unlock(&dq_list_lock); spin_unlock(&dq_list_lock);
return actqf; return actqf;
} }
......
...@@ -402,6 +402,7 @@ int ext2_new_block(struct inode *inode, unsigned long goal, ...@@ -402,6 +402,7 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
* Now search the rest of the groups. We assume that * Now search the rest of the groups. We assume that
* i and desc correctly point to the last group visited. * i and desc correctly point to the last group visited.
*/ */
retry:
for (bit = 0; !group_alloc && for (bit = 0; !group_alloc &&
bit < sbi->s_groups_count; bit++) { bit < sbi->s_groups_count; bit++) {
group_no++; group_no++;
...@@ -425,11 +426,12 @@ int ext2_new_block(struct inode *inode, unsigned long goal, ...@@ -425,11 +426,12 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
ret_block = grab_block(sb_bgl_lock(sbi, group_no), bitmap_bh->b_data, ret_block = grab_block(sb_bgl_lock(sbi, group_no), bitmap_bh->b_data,
group_size, 0); group_size, 0);
if (ret_block < 0) { if (ret_block < 0) {
ext2_error (sb, "ext2_new_block", /*
"Free blocks count corrupted for block group %d", * Someone else grabbed the last free block in this blockgroup
group_no); * before us. Retry the scan.
*/
group_alloc = 0; group_alloc = 0;
goto io_error; goto retry;
} }
got_block: got_block:
......
...@@ -1729,8 +1729,18 @@ static void __journal_remove_journal_head(struct buffer_head *bh) ...@@ -1729,8 +1729,18 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
J_ASSERT_BH(bh, buffer_jbd(bh)); J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh); J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head"); BUFFER_TRACE(bh, "remove journal_head");
J_ASSERT_BH(bh, !jh->b_frozen_data); if (jh->b_frozen_data) {
J_ASSERT_BH(bh, !jh->b_committed_data); printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__FUNCTION__);
kfree(jh->b_frozen_data);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__FUNCTION__);
kfree(jh->b_committed_data);
}
bh->b_private = NULL; bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */ jh->b_bh = NULL; /* debug, really */
clear_buffer_jbd(bh); clear_buffer_jbd(bh);
......
...@@ -1368,7 +1368,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, ...@@ -1368,7 +1368,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
nfsd_sync_dir(tdentry); nfsd_sync_dir(tdentry);
nfsd_sync_dir(fdentry); nfsd_sync_dir(fdentry);
} }
dput(ndentry);
out_dput_new: out_dput_new:
dput(ndentry); dput(ndentry);
......
...@@ -83,7 +83,8 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, ...@@ -83,7 +83,8 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl) : "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)); : "1" (iph), "2" (ihl)
: "memory");
return(sum); return(sum);
} }
......
...@@ -68,7 +68,8 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) ...@@ -68,7 +68,8 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl) : "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)); : "1" (iph), "2" (ihl)
: "memory");
return(sum); return(sum);
} }
......
...@@ -118,7 +118,7 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned ...@@ -118,7 +118,7 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned
d.base1 = PTR_MIDDLE(tss) & 0xFF; d.base1 = PTR_MIDDLE(tss) & 0xFF;
d.type = type; d.type = type;
d.p = 1; d.p = 1;
d.limit1 = 0xF; d.limit1 = (size >> 16) & 0xF;
d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
d.base3 = PTR_HIGH(tss); d.base3 = PTR_HIGH(tss);
memcpy(ptr, &d, 16); memcpy(ptr, &d, 16);
......
...@@ -574,7 +574,11 @@ extern void do_timer(struct pt_regs *); ...@@ -574,7 +574,11 @@ extern void do_timer(struct pt_regs *);
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
extern int FASTCALL(wake_up_process(struct task_struct * tsk)); extern int FASTCALL(wake_up_process(struct task_struct * tsk));
extern int FASTCALL(wake_up_process_kick(struct task_struct * tsk)); #ifdef CONFIG_SMP
extern void FASTCALL(kick_process(struct task_struct * tsk));
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
extern void FASTCALL(sched_exit(task_t * p)); extern void FASTCALL(sched_exit(task_t * p));
......
...@@ -214,7 +214,7 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); ...@@ -214,7 +214,7 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
struct pt_regs; struct pt_regs;
extern int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie); extern int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie);
#endif #endif
#define FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SIGNAL_H */ #endif /* _LINUX_SIGNAL_H */
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/param.h>
#if (HZ % USER_HZ)==0 #if (HZ % USER_HZ)==0
# define jiffies_to_clock_t(x) ((x) / (HZ / USER_HZ)) # define jiffies_to_clock_t(x) ((x) / (HZ / USER_HZ))
......
...@@ -1104,29 +1104,6 @@ long clock_nanosleep_restart(struct restart_block *restart_block); ...@@ -1104,29 +1104,6 @@ long clock_nanosleep_restart(struct restart_block *restart_block);
extern long do_clock_nanosleep(clockid_t which_clock, int flags, extern long do_clock_nanosleep(clockid_t which_clock, int flags,
struct timespec *t); struct timespec *t);
#ifdef FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
struct timespec t;
long ret;
if (copy_from_user(&t, rqtp, sizeof (t)))
return -EFAULT;
if ((unsigned) t.tv_nsec >= NSEC_PER_SEC || t.tv_sec < 0)
return -EINVAL;
ret = do_clock_nanosleep(CLOCK_REALTIME, 0, &t);
if (ret == -ERESTART_RESTARTBLOCK && rmtp &&
copy_to_user(rmtp, &t, sizeof (t)))
return -EFAULT;
return ret;
}
#endif // ! FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
asmlinkage long asmlinkage long
sys_clock_nanosleep(clockid_t which_clock, int flags, sys_clock_nanosleep(clockid_t which_clock, int flags,
const struct timespec __user *rqtp, const struct timespec __user *rqtp,
...@@ -1244,7 +1221,7 @@ do_clock_nanosleep(clockid_t which_clock, int flags, struct timespec *tsave) ...@@ -1244,7 +1221,7 @@ do_clock_nanosleep(clockid_t which_clock, int flags, struct timespec *tsave)
return 0; return 0;
} }
/* /*
* This will restart either clock_nanosleep or clock_nanosleep * This will restart clock_nanosleep. Incorrectly, btw.
*/ */
long long
clock_nanosleep_restart(struct restart_block *restart_block) clock_nanosleep_restart(struct restart_block *restart_block)
......
...@@ -530,6 +530,15 @@ static inline void resched_task(task_t *p) ...@@ -530,6 +530,15 @@ static inline void resched_task(task_t *p)
#endif #endif
} }
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
inline int task_curr(task_t *p)
{
return cpu_curr(task_cpu(p)) == p;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -568,6 +577,27 @@ void wait_task_inactive(task_t * p) ...@@ -568,6 +577,27 @@ void wait_task_inactive(task_t * p)
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
preempt_enable(); preempt_enable();
} }
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*/
void kick_process(task_t *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
#endif #endif
/*** /***
...@@ -575,7 +605,6 @@ void wait_task_inactive(task_t * p) ...@@ -575,7 +605,6 @@ void wait_task_inactive(task_t * p)
* @p: the to-be-woken-up thread * @p: the to-be-woken-up thread
* @state: the mask of task states that can be woken * @state: the mask of task states that can be woken
* @sync: do a synchronous wakeup? * @sync: do a synchronous wakeup?
* @kick: kick the CPU if the task is already running?
* *
* Put it on the run-queue if it's not already there. The "current" * Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual * thread is always on the run-queue (except when the actual
...@@ -585,7 +614,7 @@ void wait_task_inactive(task_t * p) ...@@ -585,7 +614,7 @@ void wait_task_inactive(task_t * p)
* *
* returns failure only if the task is already active. * returns failure only if the task is already active.
*/ */
static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick) static int try_to_wake_up(task_t * p, unsigned int state, int sync)
{ {
unsigned long flags; unsigned long flags;
int success = 0; int success = 0;
...@@ -626,33 +655,22 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick) ...@@ -626,33 +655,22 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick)
} }
success = 1; success = 1;
} }
#ifdef CONFIG_SMP
else
if (unlikely(kick) && task_running(rq, p) && (task_cpu(p) != smp_processor_id()))
smp_send_reschedule(task_cpu(p));
#endif
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
} }
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
return success; return success;
} }
int wake_up_process(task_t * p) int wake_up_process(task_t * p)
{ {
return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 0); return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
} }
EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(wake_up_process);
int wake_up_process_kick(task_t * p)
{
return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 1);
}
int wake_up_state(task_t *p, unsigned int state) int wake_up_state(task_t *p, unsigned int state)
{ {
return try_to_wake_up(p, state, 0, 0); return try_to_wake_up(p, state, 0);
} }
/* /*
...@@ -1621,7 +1639,7 @@ EXPORT_SYMBOL(preempt_schedule); ...@@ -1621,7 +1639,7 @@ EXPORT_SYMBOL(preempt_schedule);
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{ {
task_t *p = curr->task; task_t *p = curr->task;
return try_to_wake_up(p, mode, sync, 0); return try_to_wake_up(p, mode, sync);
} }
EXPORT_SYMBOL(default_wake_function); EXPORT_SYMBOL(default_wake_function);
...@@ -1941,15 +1959,6 @@ int task_nice(task_t *p) ...@@ -1941,15 +1959,6 @@ int task_nice(task_t *p)
EXPORT_SYMBOL(task_nice); EXPORT_SYMBOL(task_nice);
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
int task_curr(task_t *p)
{
return cpu_curr(task_cpu(p)) == p;
}
/** /**
* idle_cpu - is a given cpu idle currently? * idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question. * @cpu: the processor in question.
......
...@@ -538,8 +538,9 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) ...@@ -538,8 +538,9 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
inline void signal_wake_up(struct task_struct *t, int resume) inline void signal_wake_up(struct task_struct *t, int resume)
{ {
unsigned int mask; unsigned int mask;
int woken;
set_tsk_thread_flag(t,TIF_SIGPENDING); set_tsk_thread_flag(t, TIF_SIGPENDING);
/* /*
* If resume is set, we want to wake it up in the TASK_STOPPED case. * If resume is set, we want to wake it up in the TASK_STOPPED case.
...@@ -551,10 +552,11 @@ inline void signal_wake_up(struct task_struct *t, int resume) ...@@ -551,10 +552,11 @@ inline void signal_wake_up(struct task_struct *t, int resume)
mask = TASK_INTERRUPTIBLE; mask = TASK_INTERRUPTIBLE;
if (resume) if (resume)
mask |= TASK_STOPPED; mask |= TASK_STOPPED;
if (t->state & mask) { woken = 0;
wake_up_process_kick(t); if (t->state & mask)
return; woken = wake_up_state(t, mask);
} if (!woken)
kick_process(t);
} }
/* /*
......
...@@ -1059,7 +1059,6 @@ asmlinkage long sys_gettid(void) ...@@ -1059,7 +1059,6 @@ asmlinkage long sys_gettid(void)
{ {
return current->pid; return current->pid;
} }
#ifndef FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
static long nanosleep_restart(struct restart_block *restart) static long nanosleep_restart(struct restart_block *restart)
{ {
...@@ -1118,7 +1117,6 @@ asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp) ...@@ -1118,7 +1117,6 @@ asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
} }
return ret; return ret;
} }
#endif // ! FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
/* /*
* sys_sysinfo - fill in sysinfo struct * sys_sysinfo - fill in sysinfo struct
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpumask.h>
#include <net/flow.h> #include <net/flow.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
...@@ -65,7 +66,7 @@ static struct timer_list flow_hash_rnd_timer; ...@@ -65,7 +66,7 @@ static struct timer_list flow_hash_rnd_timer;
struct flow_flush_info { struct flow_flush_info {
atomic_t cpuleft; atomic_t cpuleft;
unsigned long cpumap; cpumask_t cpumap;
struct completion completion; struct completion completion;
}; };
static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
...@@ -73,7 +74,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; ...@@ -73,7 +74,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
static DECLARE_MUTEX(flow_cache_cpu_sem); static DECLARE_MUTEX(flow_cache_cpu_sem);
static unsigned long flow_cache_cpu_map; static cpumask_t flow_cache_cpu_map;
static unsigned int flow_cache_cpu_count; static unsigned int flow_cache_cpu_count;
static void flow_cache_new_hashrnd(unsigned long arg) static void flow_cache_new_hashrnd(unsigned long arg)
...@@ -81,7 +82,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) ...@@ -81,7 +82,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
int i; int i;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
if (test_bit(i, &flow_cache_cpu_map)) if (cpu_isset(i, flow_cache_cpu_map))
flow_hash_rnd_recalc(i) = 1; flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
...@@ -178,7 +179,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, ...@@ -178,7 +179,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
cpu = smp_processor_id(); cpu = smp_processor_id();
fle = NULL; fle = NULL;
if (!test_bit(cpu, &flow_cache_cpu_map)) if (!cpu_isset(cpu, flow_cache_cpu_map))
goto nocache; goto nocache;
if (flow_hash_rnd_recalc(cpu)) if (flow_hash_rnd_recalc(cpu))
...@@ -277,7 +278,7 @@ static void flow_cache_flush_per_cpu(void *data) ...@@ -277,7 +278,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct tasklet_struct *tasklet; struct tasklet_struct *tasklet;
cpu = smp_processor_id(); cpu = smp_processor_id();
if (!test_bit(cpu, &info->cpumap)) if (!cpu_isset(cpu, info->cpumap))
return; return;
tasklet = flow_flush_tasklet(cpu); tasklet = flow_flush_tasklet(cpu);
...@@ -301,7 +302,7 @@ void flow_cache_flush(void) ...@@ -301,7 +302,7 @@ void flow_cache_flush(void)
local_bh_disable(); local_bh_disable();
smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
if (test_bit(smp_processor_id(), &info.cpumap)) if (cpu_isset(smp_processor_id(), info.cpumap))
flow_cache_flush_tasklet((unsigned long)&info); flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable(); local_bh_enable();
...@@ -341,7 +342,7 @@ static int __devinit flow_cache_cpu_prepare(int cpu) ...@@ -341,7 +342,7 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
static int __devinit flow_cache_cpu_online(int cpu) static int __devinit flow_cache_cpu_online(int cpu)
{ {
down(&flow_cache_cpu_sem); down(&flow_cache_cpu_sem);
set_bit(cpu, &flow_cache_cpu_map); cpu_set(cpu, flow_cache_cpu_map);
flow_cache_cpu_count++; flow_cache_cpu_count++;
up(&flow_cache_cpu_sem); up(&flow_cache_cpu_sem);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if.h> #include <linux/if.h>
#include <net/sock.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -91,9 +92,11 @@ static void linkwatch_event(void *dummy) ...@@ -91,9 +92,11 @@ static void linkwatch_event(void *dummy)
linkwatch_nextevent = jiffies + HZ; linkwatch_nextevent = jiffies + HZ;
clear_bit(LW_RUNNING, &linkwatch_flags); clear_bit(LW_RUNNING, &linkwatch_flags);
rtnl_lock(); rtnl_shlock();
rtnl_exlock();
linkwatch_run_queue(); linkwatch_run_queue();
rtnl_unlock(); rtnl_exunlock();
rtnl_shunlock();
} }
......
...@@ -595,10 +595,10 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, ...@@ -595,10 +595,10 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
head_copy_len = skb_headroom(skb); head_copy_len = skb_headroom(skb);
head_copy_off = 0; head_copy_off = 0;
if (newheadroom < head_copy_len) { if (newheadroom <= head_copy_len)
head_copy_off = head_copy_len - newheadroom;
head_copy_len = newheadroom; head_copy_len = newheadroom;
} else
head_copy_off = newheadroom - head_copy_len;
/* Copy the linear header and data. */ /* Copy the linear header and data. */
if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
#include <net/sock.h> #include <net/sock.h>
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("arptables mangle table"); MODULE_DESCRIPTION("arptables arp payload mangle target");
static unsigned int static unsigned int
target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in, target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in,
......
...@@ -89,6 +89,7 @@ ...@@ -89,6 +89,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/times.h>
#include <net/protocol.h> #include <net/protocol.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/route.h> #include <net/route.h>
...@@ -2309,7 +2310,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, ...@@ -2309,7 +2310,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
ci.rta_used = rt->u.dst.__use; ci.rta_used = rt->u.dst.__use;
ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
if (rt->u.dst.expires) if (rt->u.dst.expires)
ci.rta_expires = rt->u.dst.expires - jiffies; ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
else else
ci.rta_expires = 0; ci.rta_expires = 0;
ci.rta_error = rt->u.dst.error; ci.rta_error = rt->u.dst.error;
......
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/times.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <net/tcp.h> #include <net/tcp.h>
...@@ -2490,7 +2491,7 @@ static void get_openreq4(struct sock *sk, struct open_request *req, ...@@ -2490,7 +2491,7 @@ static void get_openreq4(struct sock *sk, struct open_request *req,
TCP_SYN_RECV, TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */ 0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */ 1, /* timers active (only the expire timer) */
ttd, jiffies_to_clock_t(ttd),
req->retrans, req->retrans,
uid, uid,
0, /* non standard timer */ 0, /* non standard timer */
...@@ -2528,7 +2529,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) ...@@ -2528,7 +2529,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
"%08X %5d %8d %lu %d %p %u %u %u %u %d", "%08X %5d %8d %lu %d %p %u %u %u %u %d",
i, src, srcp, dest, destp, sp->sk_state, i, src, srcp, dest, destp, sp->sk_state,
tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
timer_active, timer_expires - jiffies, timer_active,
jiffies_to_clock_t(timer_expires - jiffies),
tp->retransmits, tp->retransmits,
sock_i_uid(sp), sock_i_uid(sp),
tp->probes_out, tp->probes_out,
...@@ -2556,7 +2558,7 @@ static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) ...@@ -2556,7 +2558,7 @@ static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p", " %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0, i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw); atomic_read(&tw->tw_refcnt), tw);
} }
......
...@@ -571,15 +571,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ...@@ -571,15 +571,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ifp->dead = 1; ifp->dead = 1;
#ifdef CONFIG_IPV6_PRIVACY
spin_lock_bh(&ifp->lock);
if (ifp->ifpub) {
__in6_ifa_put(ifp->ifpub);
ifp->ifpub = NULL;
}
spin_unlock_bh(&ifp->lock);
#endif
write_lock_bh(&addrconf_hash_lock); write_lock_bh(&addrconf_hash_lock);
for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL;
ifap = &ifa->lst_next) { ifap = &ifa->lst_next) {
...@@ -600,7 +591,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ...@@ -600,7 +591,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
if (ifa == ifp) { if (ifa == ifp) {
*ifap = ifa->tmp_next; *ifap = ifa->tmp_next;
if (ifp->ifpub) { if (ifp->ifpub) {
__in6_ifa_put(ifp->ifpub); in6_ifa_put(ifp->ifpub);
ifp->ifpub = NULL; ifp->ifpub = NULL;
} }
__in6_ifa_put(ifp); __in6_ifa_put(ifp);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/sockios.h> #include <linux/sockios.h>
#include <linux/net.h> #include <linux/net.h>
...@@ -717,7 +718,7 @@ int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr) ...@@ -717,7 +718,7 @@ int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr)
return -ENOMEM; return -ENOMEM;
rt->u.dst.obsolete = -1; rt->u.dst.obsolete = -1;
rt->rt6i_expires = rtmsg->rtmsg_info; rt->rt6i_expires = clock_t_to_jiffies(rtmsg->rtmsg_info);
if (nlh && (r = NLMSG_DATA(nlh))) { if (nlh && (r = NLMSG_DATA(nlh))) {
rt->rt6i_protocol = r->rtm_protocol; rt->rt6i_protocol = r->rtm_protocol;
} else { } else {
...@@ -1535,7 +1536,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, ...@@ -1535,7 +1536,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
RTA_PUT(skb, RTA_PRIORITY, 4, &rt->rt6i_metric); RTA_PUT(skb, RTA_PRIORITY, 4, &rt->rt6i_metric);
ci.rta_lastuse = jiffies - rt->u.dst.lastuse; ci.rta_lastuse = jiffies - rt->u.dst.lastuse;
if (rt->rt6i_expires) if (rt->rt6i_expires)
ci.rta_expires = rt->rt6i_expires - jiffies; ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies);
else else
ci.rta_expires = 0; ci.rta_expires = 0;
ci.rta_used = rt->u.dst.__use; ci.rta_used = rt->u.dst.__use;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/ipsec.h> #include <linux/ipsec.h>
#include <linux/times.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
...@@ -1941,7 +1942,7 @@ static void get_openreq6(struct seq_file *seq, ...@@ -1941,7 +1942,7 @@ static void get_openreq6(struct seq_file *seq,
TCP_SYN_RECV, TCP_SYN_RECV,
0,0, /* could print option size, but that is af dependent. */ 0,0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */ 1, /* timers active (only the expire timer) */
ttd, jiffies_to_clock_t(ttd),
req->retrans, req->retrans,
uid, uid,
0, /* non standard timer */ 0, /* non standard timer */
...@@ -1987,7 +1988,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -1987,7 +1988,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
dest->s6_addr32[2], dest->s6_addr32[3], destp, dest->s6_addr32[2], dest->s6_addr32[3], destp,
sp->sk_state, sp->sk_state,
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
timer_active, timer_expires-jiffies, timer_active,
jiffies_to_clock_t(timer_expires - jiffies),
tp->retransmits, tp->retransmits,
sock_i_uid(sp), sock_i_uid(sp),
tp->probes_out, tp->probes_out,
...@@ -2022,7 +2024,7 @@ static void get_timewait6_sock(struct seq_file *seq, ...@@ -2022,7 +2024,7 @@ static void get_timewait6_sock(struct seq_file *seq,
dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp, dest->s6_addr32[2], dest->s6_addr32[3], destp,
tw->tw_substate, 0, 0, tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw); atomic_read(&tw->tw_refcnt), tw);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment