Commit 39ceafe3 authored by Jeff Garzik's avatar Jeff Garzik

Merge redhat.com:/spare/repo/linux-2.5

into redhat.com:/spare/repo/net-drivers-2.5
parents e90b21e2 e1f936a9
...@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
struct request *rq; struct request *rq;
struct bio *bio; struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE]; char sense[SCSI_SENSE_BUFFERSIZE];
unsigned char cdb[BLK_MAX_CDB];
void *buffer; void *buffer;
if (hdr->interface_id != 'S') if (hdr->interface_id != 'S')
...@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (hdr->dxfer_len > (q->max_sectors << 9)) if (hdr->dxfer_len > (q->max_sectors << 9))
return -EIO; return -EIO;
if (copy_from_user(cdb, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
reading = writing = 0; reading = writing = 0;
buffer = NULL; buffer = NULL;
bio = NULL; bio = NULL;
...@@ -216,7 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev, ...@@ -216,7 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure * fill in request structure
*/ */
rq->cmd_len = hdr->cmd_len; rq->cmd_len = hdr->cmd_len;
memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len); memcpy(rq->cmd, cdb, hdr->cmd_len);
if (sizeof(rq->cmd) != hdr->cmd_len) if (sizeof(rq->cmd) != hdr->cmd_len)
memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
......
...@@ -40,6 +40,16 @@ ...@@ -40,6 +40,16 @@
#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) #define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This macro is used to determine the 'next' bio in the list, given the sector
* of the current stripe+device
*/
#define r5_next_bio(bio, sect) ( ( bio->bi_sector + (bio->bi_size>>9) < sect + STRIPE_SECTORS) ? bio->bi_next : NULL)
/* /*
* The following can be used to debug the driver * The following can be used to debug the driver
*/ */
...@@ -613,7 +623,7 @@ static void copy_data(int frombio, struct bio *bio, ...@@ -613,7 +623,7 @@ static void copy_data(int frombio, struct bio *bio,
int i; int i;
for (;bio && bio->bi_sector < sector+STRIPE_SECTORS; for (;bio && bio->bi_sector < sector+STRIPE_SECTORS;
bio = bio->bi_next) { bio = r5_next_bio(bio, sector) ) {
int page_offset; int page_offset;
if (bio->bi_sector >= sector) if (bio->bi_sector >= sector)
page_offset = (signed)(bio->bi_sector - sector) * 512; page_offset = (signed)(bio->bi_sector - sector) * 512;
...@@ -738,7 +748,11 @@ static void compute_parity(struct stripe_head *sh, int method) ...@@ -738,7 +748,11 @@ static void compute_parity(struct stripe_head *sh, int method)
for (i = disks; i--;) for (i = disks; i--;)
if (sh->dev[i].written) { if (sh->dev[i].written) {
sector_t sector = sh->dev[i].sector; sector_t sector = sh->dev[i].sector;
copy_data(1, sh->dev[i].written, sh->dev[i].page, sector); struct bio *wbi = sh->dev[i].written;
while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
copy_data(1, wbi, sh->dev[i].page, sector);
wbi = r5_next_bio(wbi, sector);
}
set_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(R5_UPTODATE, &sh->dev[i].flags); set_bit(R5_UPTODATE, &sh->dev[i].flags);
...@@ -791,8 +805,10 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -791,8 +805,10 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
bip = &sh->dev[dd_idx].towrite; bip = &sh->dev[dd_idx].towrite;
else else
bip = &sh->dev[dd_idx].toread; bip = &sh->dev[dd_idx].toread;
while (*bip && (*bip)->bi_sector < bi->bi_sector) while (*bip && (*bip)->bi_sector < bi->bi_sector) {
BUG_ON((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector);
bip = & (*bip)->bi_next; bip = & (*bip)->bi_next;
}
/* FIXME do I need to worry about overlapping bion */ /* FIXME do I need to worry about overlapping bion */
if (*bip && bi->bi_next && (*bip) != bi->bi_next) if (*bip && bi->bi_next && (*bip) != bi->bi_next)
BUG(); BUG();
...@@ -813,7 +829,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -813,7 +829,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
for (bi=sh->dev[dd_idx].towrite; for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
bi && bi->bi_sector <= sector; bi && bi->bi_sector <= sector;
bi = bi->bi_next) { bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bi->bi_sector + (bi->bi_size>>9) >= sector) if (bi->bi_sector + (bi->bi_size>>9) >= sector)
sector = bi->bi_sector + (bi->bi_size>>9); sector = bi->bi_sector + (bi->bi_size>>9);
} }
...@@ -883,7 +899,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -883,7 +899,7 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
copy_data(0, rbi, dev->page, dev->sector); copy_data(0, rbi, dev->page, dev->sector);
rbi2 = rbi->bi_next; rbi2 = r5_next_bio(rbi, dev->sector);
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (--rbi->bi_phys_segments == 0) { if (--rbi->bi_phys_segments == 0) {
rbi->bi_next = return_bi; rbi->bi_next = return_bi;
...@@ -928,7 +944,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -928,7 +944,7 @@ static void handle_stripe(struct stripe_head *sh)
if (bi) to_write--; if (bi) to_write--;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
struct bio *nextbi = bi->bi_next; struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) { if (--bi->bi_phys_segments == 0) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
...@@ -941,7 +957,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -941,7 +957,7 @@ static void handle_stripe(struct stripe_head *sh)
bi = sh->dev[i].written; bi = sh->dev[i].written;
sh->dev[i].written = NULL; sh->dev[i].written = NULL;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = bi->bi_next; struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) { if (--bi->bi_phys_segments == 0) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
...@@ -957,7 +973,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -957,7 +973,7 @@ static void handle_stripe(struct stripe_head *sh)
sh->dev[i].toread = NULL; sh->dev[i].toread = NULL;
if (bi) to_read--; if (bi) to_read--;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
struct bio *nextbi = bi->bi_next; struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) { if (--bi->bi_phys_segments == 0) {
bi->bi_next = return_bi; bi->bi_next = return_bi;
...@@ -1000,7 +1016,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1000,7 +1016,7 @@ static void handle_stripe(struct stripe_head *sh)
wbi = dev->written; wbi = dev->written;
dev->written = NULL; dev->written = NULL;
while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
wbi2 = wbi->bi_next; wbi2 = r5_next_bio(wbi, dev->sector);
if (--wbi->bi_phys_segments == 0) { if (--wbi->bi_phys_segments == 0) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
wbi->bi_next = return_bi; wbi->bi_next = return_bi;
......
...@@ -2466,11 +2466,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) ...@@ -2466,11 +2466,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
OUT4500( apriv, EVACK, EV_MIC ); OUT4500( apriv, EVACK, EV_MIC );
#ifdef MICSUPPORT #ifdef MICSUPPORT
if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
if (down_trylock(&apriv->sem) != 0) { set_bit(JOB_MIC, &apriv->flags);
set_bit(JOB_MIC, &apriv->flags); wake_up_interruptible(&apriv->thr_wait);
wake_up_interruptible(&apriv->thr_wait);
} else
micinit (apriv);
} }
#endif #endif
} }
......
...@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int c1, c2 = 0; int c1, c2 = 0;
int ret = 0; int ret = 0;
lock_kernel();
if (hpfs_sb(inode->i_sb)->sb_chk) { if (hpfs_sb(inode->i_sb)->sb_chk) {
if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) { if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) {
ret = -EFSERROR; ret = -EFSERROR;
......
...@@ -79,6 +79,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) ...@@ -79,6 +79,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
loff_t n = file->f_pos - 2; loff_t n = file->f_pos - 2;
spin_lock(&dcache_lock); spin_lock(&dcache_lock);
list_del(&cursor->d_child);
p = file->f_dentry->d_subdirs.next; p = file->f_dentry->d_subdirs.next;
while (n && p != &file->f_dentry->d_subdirs) { while (n && p != &file->f_dentry->d_subdirs) {
struct dentry *next; struct dentry *next;
...@@ -87,7 +88,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) ...@@ -87,7 +88,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
n--; n--;
p = p->next; p = p->next;
} }
list_del(&cursor->d_child);
list_add_tail(&cursor->d_child, p); list_add_tail(&cursor->d_child, p);
spin_unlock(&dcache_lock); spin_unlock(&dcache_lock);
} }
......
...@@ -82,6 +82,8 @@ struct request_list { ...@@ -82,6 +82,8 @@ struct request_list {
wait_queue_head_t wait[2]; wait_queue_head_t wait[2];
}; };
#define BLK_MAX_CDB 16
/* /*
* try to put the fields that are referenced together in the same cacheline * try to put the fields that are referenced together in the same cacheline
*/ */
...@@ -147,7 +149,7 @@ struct request { ...@@ -147,7 +149,7 @@ struct request {
* when request is used as a packet command carrier * when request is used as a packet command carrier
*/ */
unsigned int cmd_len; unsigned int cmd_len;
unsigned char cmd[16]; unsigned char cmd[BLK_MAX_CDB];
unsigned int data_len; unsigned int data_len;
void *data; void *data;
......
...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head) ...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return head->next == head; return head->next == head;
} }
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list, static inline void __list_splice(struct list_head *list,
struct list_head *head) struct list_head *head)
{ {
......
...@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p) ...@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p)
void release_task(struct task_struct * p) void release_task(struct task_struct * p)
{ {
int zap_leader;
task_t *leader; task_t *leader;
struct dentry *proc_dentry; struct dentry *proc_dentry;
repeat:
BUG_ON(p->state < TASK_ZOMBIE); BUG_ON(p->state < TASK_ZOMBIE);
atomic_dec(&p->user->processes); atomic_dec(&p->user->processes);
...@@ -70,10 +72,21 @@ void release_task(struct task_struct * p) ...@@ -70,10 +72,21 @@ void release_task(struct task_struct * p)
* group, and the leader is zombie, then notify the * group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.) * group leader's parent process. (if it wants notification.)
*/ */
zap_leader = 0;
leader = p->group_leader; leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && if (leader != p && thread_group_empty(leader) && leader->state == TASK_ZOMBIE) {
leader->state == TASK_ZOMBIE && leader->exit_signal != -1) BUG_ON(leader->exit_signal == -1);
do_notify_parent(leader, leader->exit_signal); do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader = (leader->exit_signal == -1);
}
p->parent->cutime += p->utime + p->cutime; p->parent->cutime += p->utime + p->cutime;
p->parent->cstime += p->stime + p->cstime; p->parent->cstime += p->stime + p->cstime;
...@@ -88,6 +101,10 @@ void release_task(struct task_struct * p) ...@@ -88,6 +101,10 @@ void release_task(struct task_struct * p)
proc_pid_flush(proc_dentry); proc_pid_flush(proc_dentry);
release_thread(p); release_thread(p);
put_task_struct(p); put_task_struct(p);
p = leader;
if (unlikely(zap_leader))
goto repeat;
} }
/* we are using it only for SMP init */ /* we are using it only for SMP init */
......
...@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) ...@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(remove_wait_queue); EXPORT_SYMBOL(remove_wait_queue);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
__set_current_state(state);
wait->flags &= ~WQ_FLAG_EXCLUSIVE; wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list)) if (list_empty(&wait->task_list))
__add_wait_queue(q, wait); __add_wait_queue(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
...@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) ...@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
__set_current_state(state);
wait->flags |= WQ_FLAG_EXCLUSIVE; wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list)) if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait); __add_wait_queue_tail(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
...@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) ...@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned long flags; unsigned long flags;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list)) { /*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list); list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
......
...@@ -222,7 +222,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) ...@@ -222,7 +222,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
write_lock(lock); write_lock(lock);
} }
sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot); sock_prot_inc_use(sk->sk_prot);
write_unlock(lock); write_unlock(lock);
} }
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock) #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock) #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x3000d /* major must be matched with number suplied by TC as version */ #define HTB_VER 0x3000e /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER #if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h" #error "Mismatched sch_htb.c and pkt_sch.h"
...@@ -290,6 +290,11 @@ static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch) ...@@ -290,6 +290,11 @@ static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* then finish and return direct queue. * then finish and return direct queue.
*/ */
#define HTB_DIRECT (struct htb_class*)-1 #define HTB_DIRECT (struct htb_class*)-1
static inline u32 htb_classid(struct htb_class *cl)
{
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
...@@ -703,7 +708,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -703,7 +708,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->stats.bytes += skb->len; sch->stats.packets++; sch->stats.bytes += skb->len;
HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb); HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",htb_classid(cl),skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -731,7 +736,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -731,7 +736,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate (q,cl); htb_activate (q,cl);
sch->q.qlen++; sch->q.qlen++;
HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb); HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",htb_classid(cl),skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -1381,11 +1386,16 @@ static void htb_destroy(struct Qdisc* sch) ...@@ -1381,11 +1386,16 @@ static void htb_destroy(struct Qdisc* sch)
#ifdef HTB_RATECM #ifdef HTB_RATECM
del_timer_sync (&q->rttim); del_timer_sync (&q->rttim);
#endif #endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
htb_destroy_filters(&q->filter_list);
while (!list_empty(&q->root)) while (!list_empty(&q->root))
htb_destroy_class (sch,list_entry(q->root.next, htb_destroy_class (sch,list_entry(q->root.next,
struct htb_class,sibling)); struct htb_class,sibling));
htb_destroy_filters(&q->filter_list);
__skb_queue_purge(&q->direct_queue); __skb_queue_purge(&q->direct_queue);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment