Commit 3cdecffe authored by Dave Jones's avatar Dave Jones

Merge delerium.codemonkey.org.uk:/mnt/nfs/sepia/bar/src/kernel/2.6/trees/bk-linus

into delerium.codemonkey.org.uk:/mnt/nfs/sepia/bar/src/kernel/2.6/trees/agpgart
parents 2fa72d28 056ffaad
...@@ -359,7 +359,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -359,7 +359,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
return; return;
tsk->thread.cr2 = address; tsk->thread.cr2 = address;
tsk->thread.error_code = error_code; /* Kernel addresses are always protection faults */
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14; tsk->thread.trap_no = 14;
info.si_signo = SIGSEGV; info.si_signo = SIGSEGV;
info.si_errno = 0; info.si_errno = 0;
......
...@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
struct request *rq; struct request *rq;
struct bio *bio; struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE]; char sense[SCSI_SENSE_BUFFERSIZE];
unsigned char cdb[BLK_MAX_CDB];
void *buffer; void *buffer;
if (hdr->interface_id != 'S') if (hdr->interface_id != 'S')
...@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (hdr->dxfer_len > (q->max_sectors << 9)) if (hdr->dxfer_len > (q->max_sectors << 9))
return -EIO; return -EIO;
if (copy_from_user(cdb, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
reading = writing = 0; reading = writing = 0;
buffer = NULL; buffer = NULL;
bio = NULL; bio = NULL;
...@@ -216,7 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -216,7 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure * fill in request structure
*/ */
rq->cmd_len = hdr->cmd_len; rq->cmd_len = hdr->cmd_len;
memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len); memcpy(rq->cmd, cdb, hdr->cmd_len);
if (sizeof(rq->cmd) != hdr->cmd_len) if (sizeof(rq->cmd) != hdr->cmd_len)
memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
......
...@@ -147,7 +147,7 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr, ...@@ -147,7 +147,7 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_WRITE) if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, NVIDIA_SMB_DATA); outb_p(command, NVIDIA_SMB_CMD);
protocol |= NVIDIA_SMB_PRTCL_BYTE; protocol |= NVIDIA_SMB_PRTCL_BYTE;
break; break;
......
...@@ -864,13 +864,6 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev) ...@@ -864,13 +864,6 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
pci_release_regions (pdev); pci_release_regions (pdev);
#ifndef NETDRV_NDEBUG
/* poison memory before freeing */
memset (dev, 0xBC,
sizeof (struct net_device) +
sizeof (struct netdrv_private));
#endif /* NETDRV_NDEBUG */
free_netdev (dev); free_netdev (dev);
pci_set_drvdata (pdev, NULL); pci_set_drvdata (pdev, NULL);
......
...@@ -1766,8 +1766,6 @@ static void __exit pcnet32_cleanup_module(void) ...@@ -1766,8 +1766,6 @@ static void __exit pcnet32_cleanup_module(void)
next_dev = lp->next; next_dev = lp->next;
unregister_netdev(pcnet32_dev); unregister_netdev(pcnet32_dev);
release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
if (lp->pci_dev)
pci_unregister_driver(&pcnet32_driver);
pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
free_netdev(pcnet32_dev); free_netdev(pcnet32_dev);
pcnet32_dev = next_dev; pcnet32_dev = next_dev;
......
...@@ -642,10 +642,6 @@ rtl8169_remove_one(struct pci_dev *pdev) ...@@ -642,10 +642,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
iounmap(tp->mmio_addr); iounmap(tp->mmio_addr);
pci_release_regions(pdev); pci_release_regions(pdev);
// poison memory before freeing
memset(dev, 0xBC,
sizeof (struct net_device) + sizeof (struct rtl8169_private));
pci_disable_device(pdev); pci_disable_device(pdev);
free_netdev(dev); free_netdev(dev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
......
...@@ -703,10 +703,6 @@ SiS190_remove_one(struct pci_dev *pdev) ...@@ -703,10 +703,6 @@ SiS190_remove_one(struct pci_dev *pdev)
iounmap(tp->mmio_addr); iounmap(tp->mmio_addr);
pci_release_regions(pdev); pci_release_regions(pdev);
// poison memory before freeing
memset(dev, 0xBC,
sizeof (struct net_device) + sizeof (struct sis190_private));
free_netdev(dev); free_netdev(dev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
......
...@@ -2466,11 +2466,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) ...@@ -2466,11 +2466,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
OUT4500( apriv, EVACK, EV_MIC ); OUT4500( apriv, EVACK, EV_MIC );
#ifdef MICSUPPORT #ifdef MICSUPPORT
if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
if (down_trylock(&apriv->sem) != 0) {
set_bit(JOB_MIC, &apriv->flags); set_bit(JOB_MIC, &apriv->flags);
wake_up_interruptible(&apriv->thr_wait); wake_up_interruptible(&apriv->thr_wait);
} else
micinit (apriv);
} }
#endif #endif
} }
......
...@@ -517,6 +517,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc) ...@@ -517,6 +517,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc)
pc->current_position=pc->buffer; pc->current_position=pc->buffer;
bcount.all = IDE_MIN(pc->request_transfer, 63 * 1024); /* Request to transfer the entire buffer at once */ bcount.all = IDE_MIN(pc->request_transfer, 63 * 1024); /* Request to transfer the entire buffer at once */
feature.all = 0;
if (drive->using_dma && rq->bio) { if (drive->using_dma && rq->bio) {
if (test_bit(PC_WRITING, &pc->flags)) if (test_bit(PC_WRITING, &pc->flags))
feature.b.dma = !HWIF(drive)->ide_dma_write(drive); feature.b.dma = !HWIF(drive)->ide_dma_write(drive);
......
...@@ -3224,8 +3224,6 @@ void ata_pci_remove_one (struct pci_dev *pdev) ...@@ -3224,8 +3224,6 @@ void ata_pci_remove_one (struct pci_dev *pdev)
scsi_host_put(ap->host); /* FIXME: check return val */ scsi_host_put(ap->host); /* FIXME: check return val */
} }
kfree(host_set);
pci_release_regions(pdev); pci_release_regions(pdev);
for (i = 0; i < host_set->n_ports; i++) { for (i = 0; i < host_set->n_ports; i++) {
...@@ -3242,6 +3240,7 @@ void ata_pci_remove_one (struct pci_dev *pdev) ...@@ -3242,6 +3240,7 @@ void ata_pci_remove_one (struct pci_dev *pdev)
} }
} }
kfree(host_set);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
......
...@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int c1, c2 = 0; int c1, c2 = 0;
int ret = 0; int ret = 0;
lock_kernel();
if (hpfs_sb(inode->i_sb)->sb_chk) { if (hpfs_sb(inode->i_sb)->sb_chk) {
if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) { if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) {
ret = -EFSERROR; ret = -EFSERROR;
......
...@@ -79,6 +79,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) ...@@ -79,6 +79,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
loff_t n = file->f_pos - 2; loff_t n = file->f_pos - 2;
spin_lock(&dcache_lock); spin_lock(&dcache_lock);
list_del(&cursor->d_child);
p = file->f_dentry->d_subdirs.next; p = file->f_dentry->d_subdirs.next;
while (n && p != &file->f_dentry->d_subdirs) { while (n && p != &file->f_dentry->d_subdirs) {
struct dentry *next; struct dentry *next;
...@@ -87,7 +88,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) ...@@ -87,7 +88,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
n--; n--;
p = p->next; p = p->next;
} }
list_del(&cursor->d_child);
list_add_tail(&cursor->d_child, p); list_add_tail(&cursor->d_child, p);
spin_unlock(&dcache_lock); spin_unlock(&dcache_lock);
} }
......
...@@ -1666,10 +1666,14 @@ static int get_tid_list(int index, unsigned int *tids, struct inode *dir) ...@@ -1666,10 +1666,14 @@ static int get_tid_list(int index, unsigned int *tids, struct inode *dir)
index -= 2; index -= 2;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do { /*
* The starting point task (leader_task) might be an already
* unlinked task, which cannot be used to access the task-list
* via next_thread().
*/
if (pid_alive(task)) do {
int tid = task->pid; int tid = task->pid;
if (!pid_alive(task))
continue;
if (--index >= 0) if (--index >= 0)
continue; continue;
tids[nr_tids] = tid; tids[nr_tids] = tid;
......
...@@ -50,9 +50,9 @@ ...@@ -50,9 +50,9 @@
__asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
#define rdtscll(val) do { \ #define rdtscll(val) do { \
unsigned int a,d; \ unsigned int __a,__d; \
asm volatile("rdtsc" : "=a" (a), "=d" (d)); \ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long)a) | (((unsigned long)d)<<32); \ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
} while(0) } while(0)
#define rdpmc(counter,low,high) \ #define rdpmc(counter,low,high) \
......
...@@ -82,6 +82,8 @@ struct request_list { ...@@ -82,6 +82,8 @@ struct request_list {
wait_queue_head_t wait[2]; wait_queue_head_t wait[2];
}; };
#define BLK_MAX_CDB 16
/* /*
* try to put the fields that are referenced together in the same cacheline * try to put the fields that are referenced together in the same cacheline
*/ */
...@@ -147,7 +149,7 @@ struct request { ...@@ -147,7 +149,7 @@ struct request {
* when request is used as a packet command carrier * when request is used as a packet command carrier
*/ */
unsigned int cmd_len; unsigned int cmd_len;
unsigned char cmd[16]; unsigned char cmd[BLK_MAX_CDB];
unsigned int data_len; unsigned int data_len;
void *data; void *data;
......
...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head) ...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return head->next == head; return head->next == head;
} }
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list, static inline void __list_splice(struct list_head *list,
struct list_head *head) struct list_head *head)
{ {
......
...@@ -138,6 +138,7 @@ enum ...@@ -138,6 +138,7 @@ enum
#define RTPROT_ZEBRA 11 /* Zebra */ #define RTPROT_ZEBRA 11 /* Zebra */
#define RTPROT_BIRD 12 /* BIRD */ #define RTPROT_BIRD 12 /* BIRD */
#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ #define RTPROT_DNROUTED 13 /* DECnet routing daemon */
#define RTPROT_XORP 14 /* XORP */
/* rtm_scope /* rtm_scope
......
...@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p) ...@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p)
void release_task(struct task_struct * p) void release_task(struct task_struct * p)
{ {
int zap_leader;
task_t *leader; task_t *leader;
struct dentry *proc_dentry; struct dentry *proc_dentry;
repeat:
BUG_ON(p->state < TASK_ZOMBIE); BUG_ON(p->state < TASK_ZOMBIE);
atomic_dec(&p->user->processes); atomic_dec(&p->user->processes);
...@@ -70,10 +72,21 @@ void release_task(struct task_struct * p) ...@@ -70,10 +72,21 @@ void release_task(struct task_struct * p)
* group, and the leader is zombie, then notify the * group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.) * group leader's parent process. (if it wants notification.)
*/ */
zap_leader = 0;
leader = p->group_leader; leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && if (leader != p && thread_group_empty(leader) && leader->state == TASK_ZOMBIE) {
leader->state == TASK_ZOMBIE && leader->exit_signal != -1) BUG_ON(leader->exit_signal == -1);
do_notify_parent(leader, leader->exit_signal); do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader = (leader->exit_signal == -1);
}
p->parent->cutime += p->utime + p->cutime; p->parent->cutime += p->utime + p->cutime;
p->parent->cstime += p->stime + p->cstime; p->parent->cstime += p->stime + p->cstime;
...@@ -88,6 +101,10 @@ void release_task(struct task_struct * p) ...@@ -88,6 +101,10 @@ void release_task(struct task_struct * p)
proc_pid_flush(proc_dentry); proc_pid_flush(proc_dentry);
release_thread(p); release_thread(p);
put_task_struct(p); put_task_struct(p);
p = leader;
if (unlikely(zap_leader))
goto repeat;
} }
/* we are using it only for SMP init */ /* we are using it only for SMP init */
......
...@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) ...@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(remove_wait_queue); EXPORT_SYMBOL(remove_wait_queue);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
__set_current_state(state);
wait->flags &= ~WQ_FLAG_EXCLUSIVE; wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list)) if (list_empty(&wait->task_list))
__add_wait_queue(q, wait); __add_wait_queue(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
...@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) ...@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
__set_current_state(state);
wait->flags |= WQ_FLAG_EXCLUSIVE; wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list)) if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait); __add_wait_queue_tail(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
...@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) ...@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned long flags; unsigned long flags;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list)) { /*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list); list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
......
...@@ -646,7 +646,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -646,7 +646,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
*/ */
p->activated = -1; p->activated = -1;
} }
if (sync) if (sync && (task_cpu(p) == smp_processor_id()))
__activate_task(p, rq); __activate_task(p, rq);
else { else {
activate_task(p, rq); activate_task(p, rq);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mount.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -474,9 +475,14 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -474,9 +475,14 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
struct rb_node ** rb_link, * rb_parent; struct rb_node ** rb_link, * rb_parent;
unsigned long charged = 0; unsigned long charged = 0;
if (file && (!file->f_op || !file->f_op->mmap)) if (file) {
if (!file->f_op || !file->f_op->mmap)
return -ENODEV; return -ENODEV;
if ((prot & PROT_EXEC) && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
return -EPERM;
}
if (!len) if (!len)
return addr; return addr;
......
...@@ -180,7 +180,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) ...@@ -180,7 +180,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
struct rtable *rt; struct rtable *rt;
struct flowi fl = { .nl_u = struct flowi fl = { .nl_u =
{ .ip4_u = { .daddr = iph->daddr, .saddr = 0 , { .ip4_u = { .daddr = iph->daddr, .saddr = 0 ,
.tos = iph->tos} }, .proto = 0}; .tos = RT_TOS(iph->tos)} }, .proto = 0};
if (!ip_route_output_key(&rt, &fl)) { if (!ip_route_output_key(&rt, &fl)) {
/* Bridged-and-DNAT'ed traffic doesn't /* Bridged-and-DNAT'ed traffic doesn't
......
...@@ -53,7 +53,7 @@ unsigned long ip_ct_tcp_timeout_syn_sent = 2 MINS; ...@@ -53,7 +53,7 @@ unsigned long ip_ct_tcp_timeout_syn_sent = 2 MINS;
unsigned long ip_ct_tcp_timeout_syn_recv = 60 SECS; unsigned long ip_ct_tcp_timeout_syn_recv = 60 SECS;
unsigned long ip_ct_tcp_timeout_established = 5 DAYS; unsigned long ip_ct_tcp_timeout_established = 5 DAYS;
unsigned long ip_ct_tcp_timeout_fin_wait = 2 MINS; unsigned long ip_ct_tcp_timeout_fin_wait = 2 MINS;
unsigned long ip_ct_tcp_timeout_close_wait = 3 DAYS; unsigned long ip_ct_tcp_timeout_close_wait = 60 SECS;
unsigned long ip_ct_tcp_timeout_last_ack = 30 SECS; unsigned long ip_ct_tcp_timeout_last_ack = 30 SECS;
unsigned long ip_ct_tcp_timeout_time_wait = 2 MINS; unsigned long ip_ct_tcp_timeout_time_wait = 2 MINS;
unsigned long ip_ct_tcp_timeout_close = 10 SECS; unsigned long ip_ct_tcp_timeout_close = 10 SECS;
......
...@@ -201,7 +201,8 @@ static unsigned int ip_refrag(unsigned int hooknum, ...@@ -201,7 +201,8 @@ static unsigned int ip_refrag(unsigned int hooknum,
/* Local packets are never produced too large for their /* Local packets are never produced too large for their
interface. We degfragment them at LOCAL_OUT, however, interface. We degfragment them at LOCAL_OUT, however,
so we have to refragment them here. */ so we have to refragment them here. */
if ((*pskb)->len > dst_pmtu(&rt->u.dst)) { if ((*pskb)->len > dst_pmtu(&rt->u.dst) &&
!skb_shinfo(*pskb)->tso_size) {
/* No hook can be after us, so this should be OK. */ /* No hook can be after us, so this should be OK. */
ip_fragment(*pskb, okfn); ip_fragment(*pskb, okfn);
return NF_STOLEN; return NF_STOLEN;
......
...@@ -2356,6 +2356,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) ...@@ -2356,6 +2356,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{ {
struct tcp_iter_state* st = seq->private; struct tcp_iter_state* st = seq->private;
st->state = TCP_SEQ_STATE_LISTENING;
st->num = 0; st->num = 0;
return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
} }
......
...@@ -222,7 +222,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) ...@@ -222,7 +222,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
write_lock(lock); write_lock(lock);
} }
sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot); sock_prot_inc_use(sk->sk_prot);
write_unlock(lock); write_unlock(lock);
} }
......
...@@ -825,7 +825,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg ...@@ -825,7 +825,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
struct sockaddr_in sin; struct sockaddr_in sin;
sin.sin_family = AF_INET; sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; sin.sin_port = sin6 ? sin6->sin6_port : inet->dport;
sin.sin_addr.s_addr = daddr->s6_addr[3]; sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin; msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin); msg->msg_namelen = sizeof(sin);
do_udp_sendmsg: do_udp_sendmsg:
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock) #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock) #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x3000d /* major must be matched with number suplied by TC as version */ #define HTB_VER 0x3000e /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER #if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h" #error "Mismatched sch_htb.c and pkt_sch.h"
...@@ -290,6 +290,11 @@ static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch) ...@@ -290,6 +290,11 @@ static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* then finish and return direct queue. * then finish and return direct queue.
*/ */
#define HTB_DIRECT (struct htb_class*)-1 #define HTB_DIRECT (struct htb_class*)-1
static inline u32 htb_classid(struct htb_class *cl)
{
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
...@@ -703,7 +708,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -703,7 +708,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->stats.bytes += skb->len; sch->stats.packets++; sch->stats.bytes += skb->len;
HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb); HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",htb_classid(cl),skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -731,7 +736,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -731,7 +736,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate (q,cl); htb_activate (q,cl);
sch->q.qlen++; sch->q.qlen++;
HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb); HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",htb_classid(cl),skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -1381,11 +1386,16 @@ static void htb_destroy(struct Qdisc* sch) ...@@ -1381,11 +1386,16 @@ static void htb_destroy(struct Qdisc* sch)
#ifdef HTB_RATECM #ifdef HTB_RATECM
del_timer_sync (&q->rttim); del_timer_sync (&q->rttim);
#endif #endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
htb_destroy_filters(&q->filter_list);
while (!list_empty(&q->root)) while (!list_empty(&q->root))
htb_destroy_class (sch,list_entry(q->root.next, htb_destroy_class (sch,list_entry(q->root.next,
struct htb_class,sibling)); struct htb_class,sibling));
htb_destroy_filters(&q->filter_list);
__skb_queue_purge(&q->direct_queue); __skb_queue_purge(&q->direct_queue);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment