Commit cb32fa29 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/net-2.6

into kernel.bkbits.net:/home/davem/net-2.6
parents a46c9835 fb6d17bf
......@@ -715,9 +715,11 @@ accept_redirects - BOOLEAN
disabled if local forwarding is enabled.
autoconf - BOOLEAN
Configure link-local addresses using L2 hardware addresses.
Autoconfigure addresses using Prefix Information in Router
Advertisements.
Default: TRUE
Functional default: enabled if accept_ra is enabled.
disabled if accept_ra is disabled.
dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send.
......
......@@ -495,6 +495,8 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
return -EFAULT;
slave_dev = dev_get_by_name(sc.slave_name);
if (!slave_dev)
return -ENODEV;
ret = -EINVAL;
......@@ -527,11 +529,13 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
return -EFAULT;
eql = dev->priv;
slave_dev = dev_get_by_name(sc.slave_name);
if (!slave_dev)
return -ENODEV;
ret = -EINVAL;
eql = dev->priv;
spin_lock_bh(&eql->queue.lock);
if (eql_is_slave(slave_dev)) {
slave = __eql_find_slave_dev(&eql->queue, slave_dev);
......
......@@ -33,6 +33,7 @@
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
......@@ -304,16 +305,18 @@ static int ali_ircc_open(int i, chipio_t *info)
self->tx_buff.truesize = 14384;
/* Allocate memory if needed */
self->rx_buff.head = (__u8 *) kmalloc(self->rx_buff.truesize,
GFP_KERNEL |GFP_DMA);
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
......@@ -362,9 +365,11 @@ static int ali_ircc_open(int i, chipio_t *info)
return 0;
err_out4:
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
......@@ -398,10 +403,12 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
dev_self[self->index] = NULL;
free_netdev(self->netdev);
......@@ -1572,7 +1579,8 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
self->io.direction = IO_XMIT;
irda_setup_dma(self->io.dma,
self->tx_fifo.queue[self->tx_fifo.ptr].start,
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len,
DMA_TX_MODE);
......@@ -1724,8 +1732,8 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
self->st_fifo.len = self->st_fifo.pending_bytes = 0;
self->st_fifo.tail = self->st_fifo.head = 0;
irda_setup_dma(self->io.dma, self->rx_buff.data,
self->rx_buff.truesize, DMA_RX_MODE);
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_RX_MODE);
/* Set Receive Mode,Brick Wall */
//switch_bank(iobase, BANK0);
......
......@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <asm/io.h>
/* SIR Register */
......@@ -198,6 +199,8 @@ struct ali_ircc_cb {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
dma_addr_t tx_buff_dma;
dma_addr_t rx_buff_dma;
__u8 ier; /* Interrupt enable register */
......
......@@ -52,6 +52,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
......@@ -307,8 +308,9 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
self->tx_buff.truesize = 14384;
/* Allocate memory if needed */
self->rx_buff.head = (__u8 *) kmalloc(self->rx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto out2;
......@@ -316,8 +318,9 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
}
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto out3;
......@@ -368,9 +371,11 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
return 0;
out4:
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
out3:
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
out2:
release_region(self->io.fir_base, self->io.fir_ext);
out1:
......@@ -404,10 +409,12 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
dev_self[self->index] = NULL;
free_netdev(self->netdev);
......@@ -1409,7 +1416,8 @@ static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase)
outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
irda_setup_dma(self->io.dma,
self->tx_fifo.queue[self->tx_fifo.ptr].start,
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len,
DMA_TX_MODE);
......@@ -1566,8 +1574,8 @@ static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self)
self->st_fifo.len = self->st_fifo.pending_bytes = 0;
self->st_fifo.tail = self->st_fifo.head = 0;
irda_setup_dma(self->io.dma, self->rx_buff.data,
self->rx_buff.truesize, DMA_RX_MODE);
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_RX_MODE);
/* Enable DMA */
switch_bank(iobase, BANK0);
......
......@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <asm/io.h>
/* DMA modes needed */
......@@ -255,6 +256,8 @@ struct nsc_ircc_cb {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
dma_addr_t tx_buff_dma;
dma_addr_t rx_buff_dma;
__u8 ier; /* Interrupt enable register */
......
......@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
......@@ -112,6 +113,8 @@ struct smsc_ircc_cb {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
dma_addr_t tx_buff_dma;
dma_addr_t rx_buff_dma;
struct qos_info qos; /* QoS capabilities for this device */
......@@ -413,16 +416,18 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
self->rx_buff.head = (u8 *) kmalloc(self->rx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
ERROR("%s, Can't allocate memory for receive buffer!\n",
driver_name);
goto err_out2;
}
self->tx_buff.head = (u8 *) kmalloc(self->tx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
ERROR("%s, Can't allocate memory for transmit buffer!\n",
driver_name);
......@@ -464,9 +469,11 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
return 0;
err_out4:
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
free_netdev(self->netdev);
dev_self[--dev_count] = NULL;
......@@ -1159,7 +1166,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs)
IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB);
/* Setup DMA controller (must be done after enabling chip DMA) */
irda_setup_dma(self->io.dma, self->tx_buff.data, self->tx_buff.len,
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_TX_MODE);
/* Enable interrupt */
......@@ -1249,8 +1256,8 @@ static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase)
outb(2050 & 0xff, iobase+IRCC_RX_SIZE_LO);
/* Setup DMA controller */
irda_setup_dma(self->io.dma, self->rx_buff.data,
self->rx_buff.truesize, DMA_RX_MODE);
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_RX_MODE);
/* Enable burst mode chip Rx DMA */
register_bank(iobase, 1);
......@@ -1717,10 +1724,12 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
release_region(self->io.sir_base, self->io.sir_ext);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
free_netdev(self->netdev);
......
......@@ -39,6 +39,7 @@ F02 Oct/28/02: Add SB device ID for 3147 and 3177.
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
......@@ -383,7 +384,8 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
(__u8 *) kmalloc(self->rx_buff.truesize, GFP_KERNEL | GFP_DMA);
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
......@@ -391,7 +393,8 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
(__u8 *) kmalloc(self->tx_buff.truesize, GFP_KERNEL | GFP_DMA);
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
......@@ -432,9 +435,11 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
return 0;
err_out4:
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
......@@ -468,9 +473,11 @@ static int __exit via_ircc_close(struct via_ircc_cb *self)
__FUNCTION__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
dev_self[self->index] = NULL;
free_netdev(self->netdev);
......@@ -816,7 +823,7 @@ static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
EnTXDMA(iobase, ON);
EnRXDMA(iobase, OFF);
irda_setup_dma(self->io.dma, self->tx_buff.data, self->tx_buff.len,
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_TX_MODE);
SetSendByte(iobase, self->tx_buff.len);
......@@ -897,7 +904,8 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
EnTXDMA(iobase, ON);
EnRXDMA(iobase, OFF);
irda_setup_dma(self->io.dma,
self->tx_fifo.queue[self->tx_fifo.ptr].start,
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
#ifdef DBGMSG
DBG(printk
......@@ -1022,7 +1030,7 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
EnAllInt(iobase, ON);
EnTXDMA(iobase, OFF);
EnRXDMA(iobase, ON);
irda_setup_dma(self->io.dma2, self->rx_buff.data,
irda_setup_dma(self->io.dma2, self->rx_buff_dma,
self->rx_buff.truesize, DMA_RX_MODE);
TXStart(iobase, OFF);
RXStart(iobase, ON);
......
......@@ -33,6 +33,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <linux/time.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <asm/io.h>
#define MAX_TX_WINDOW 7
......@@ -102,6 +103,8 @@ struct via_ircc_cb {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
dma_addr_t tx_buff_dma;
dma_addr_t rx_buff_dma;
__u8 ier; /* Interrupt enable register */
......
......@@ -50,6 +50,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
......@@ -207,8 +208,9 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->tx_buff.truesize = 4000;
/* Allocate memory if needed */
self->rx_buff.head = (__u8 *) kmalloc(self->rx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
......@@ -216,8 +218,9 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize,
GFP_KERNEL|GFP_DMA);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
......@@ -252,9 +255,11 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
return 0;
err_out3:
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out2:
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out1:
free_netdev(dev);
err_out:
......@@ -297,10 +302,12 @@ static int w83977af_close(struct w83977af_ir *self)
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
free_netdev(self->netdev);
......@@ -606,10 +613,10 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, isa_virt_to_bus(self->tx_buff.data));
set_dma_addr(self->io.dma, self->tx_buff_dma);
set_dma_count(self->io.dma, self->tx_buff.len);
#else
irda_setup_dma(self->io.dma, self->tx_buff.data, self->tx_buff.len,
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
#endif
self->io.direction = IO_XMIT;
......@@ -763,10 +770,10 @@ int w83977af_dma_receive(struct w83977af_ir *self)
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, isa_virt_to_bus(self->rx_buff.data));
set_dma_addr(self->io.dma, self->rx_buff_dma);
set_dma_count(self->io.dma, self->rx_buff.truesize);
#else
irda_setup_dma(self->io.dma, self->rx_buff.data, self->rx_buff.truesize,
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_MODE_READ);
#endif
/*
......
......@@ -26,6 +26,7 @@
#define W83977AF_IR_H
#include <asm/io.h>
#include <linux/types.h>
/* Flags for configuration register CRF0 */
#define ENBNKSEL 0x01
......@@ -179,6 +180,8 @@ struct w83977af_ir {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
dma_addr_t tx_buff_dma;
dma_addr_t rx_buff_dma;
/* Note : currently locking is *very* incomplete, but this
* will get you started. Check in nsc-ircc.c for a proper
......
......@@ -437,5 +437,6 @@ struct tc_dly_qopt
{
__u32 latency;
__u32 limit;
__u32 loss;
};
#endif
......@@ -39,11 +39,13 @@
#ifndef IRDA_DEVICE_H
#define IRDA_DEVICE_H
#include <linux/config.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/irda.h>
#include <linux/types.h>
#include <net/pkt_sched.h>
#include <net/irda/irda.h>
......@@ -236,7 +238,7 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type);
int irda_device_dongle_cleanup(dongle_t *dongle);
#ifdef CONFIG_ISA
void irda_setup_dma(int channel, char *buffer, int count, int mode);
void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode);
#endif
void irda_task_delete(struct irda_task *task);
......
......@@ -33,8 +33,6 @@ static int __init br_init(void)
{
br_fdb_init();
br_sysfs_init();
#ifdef CONFIG_BRIDGE_NETFILTER
if (br_netfilter_init())
return 1;
......@@ -69,7 +67,6 @@ static void __exit br_deinit(void)
#endif
br_handle_frame_hook = NULL;
br_sysfs_fini();
br_fdb_fini();
}
......
......@@ -217,9 +217,6 @@ extern void br_sysfs_removeif(struct net_bridge_port *p);
extern void br_sysfs_freeif(struct net_bridge_port *p);
/* br_sysfs_br.c */
extern struct subsystem bridge_subsys;
extern void br_sysfs_init(void);
extern void br_sysfs_fini(void);
extern int br_sysfs_addbr(struct net_device *dev);
extern void br_sysfs_delbr(struct net_device *dev);
......@@ -228,8 +225,6 @@ extern void br_sysfs_delbr(struct net_device *dev);
#define br_sysfs_addif(p) (0)
#define br_sysfs_removeif(p) do { } while(0)
#define br_sysfs_freeif(p) kfree(p)
#define br_sysfs_init() do { } while(0)
#define br_sysfs_fini() do { } while(0)
#define br_sysfs_addbr(dev) (0)
#define br_sysfs_delbr(dev) do { } while(0)
#endif /* CONFIG_SYSFS */
......
......@@ -300,23 +300,6 @@ static struct bin_attribute bridge_forward = {
.read = brforward_read,
};
/*
* This is a dummy kset so bridge objects don't cause
* hotplug events
*/
decl_subsys_name(bridge, net_bridge, NULL, NULL);
void br_sysfs_init(void)
{
subsystem_register(&bridge_subsys);
}
void br_sysfs_fini(void)
{
subsystem_unregister(&bridge_subsys);
}
/*
* Add entries in sysfs onto the existing network class device
* for the bridge.
......@@ -351,7 +334,7 @@ int br_sysfs_addbr(struct net_device *dev)
kobject_set_name(&br->ifobj, SYSFS_BRIDGE_PORT_SUBDIR);
br->ifobj.ktype = NULL;
br->ifobj.kset = &bridge_subsys.kset;
br->ifobj.kset = NULL;
br->ifobj.parent = brobj;
err = kobject_register(&br->ifobj);
......
......@@ -227,7 +227,7 @@ int br_sysfs_addif(struct net_bridge_port *p)
kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
p->kobj.ktype = &brport_ktype;
p->kobj.parent = &(p->dev->class_dev.kobj);
p->kobj.kset = &bridge_subsys.kset;
p->kobj.kset = NULL;
err = kobject_add(&p->kobj);
if(err)
......
......@@ -365,7 +365,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
err = -EADDRNOTAVAIL;
for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
if (pmc->ifindex != pgsr->gsr_interface)
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
continue;
if (ipv6_addr_cmp(&pmc->addr, group) == 0)
break;
......
......@@ -536,7 +536,7 @@ int irda_device_set_mode(struct net_device* dev, int mode)
* Setup the DMA channel. Commonly used by ISA FIR drivers
*
*/
void irda_setup_dma(int channel, char *buffer, int count, int mode)
void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode)
{
unsigned long flags;
......@@ -545,7 +545,7 @@ void irda_setup_dma(int channel, char *buffer, int count, int mode)
disable_dma(channel);
clear_dma_ff(channel);
set_dma_mode(channel, mode);
set_dma_addr(channel, isa_virt_to_bus(buffer));
set_dma_addr(channel, buffer);
set_dma_count(channel, count);
enable_dma(channel);
......
......@@ -1054,7 +1054,7 @@ cbq_dequeue(struct Qdisc *sch)
if (sch->q.qlen) {
sch->stats.overlimits++;
if (q->wd_expires && !netif_queue_stopped(sch->dev)) {
if (q->wd_expires) {
long delay = PSCHED_US2JIFFIE(q->wd_expires);
if (delay <= 0)
delay = 1;
......
......@@ -40,6 +40,7 @@
struct dly_sched_data {
u32 latency;
u32 limit;
u32 loss;
struct timer_list timer;
struct Qdisc *qdisc;
};
......@@ -58,6 +59,12 @@ static int dly_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct dly_skb_cb *cb = (struct dly_skb_cb *)skb->cb;
int ret;
/* Random packet drop 0 => none, ~0 => all */
if (q->loss >= net_random()) {
sch->stats.drops++;
return 0; /* lie about loss so TCP doesn't know */
}
PSCHED_GET_TIME(cb->queuetime);
/* Queue to underlying scheduler */
......@@ -69,7 +76,7 @@ static int dly_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->stats.bytes += skb->len;
sch->stats.packets++;
}
return 0;
return ret;
}
/* Requeue packets but don't change time stamp */
......@@ -104,12 +111,14 @@ static unsigned int dly_drop(struct Qdisc *sch)
static struct sk_buff *dly_dequeue(struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
struct sk_buff *skb = q->qdisc->dequeue(q->qdisc);
struct sk_buff *skb;
retry:
skb = q->qdisc->dequeue(q->qdisc);
if (skb) {
struct dly_skb_cb *cb = (struct dly_skb_cb *)skb->cb;
psched_time_t now;
long diff;
long diff, delay;
PSCHED_GET_TIME(now);
diff = q->latency - PSCHED_TDIFF(now, cb->queuetime);
......@@ -120,17 +129,17 @@ static struct sk_buff *dly_dequeue(struct Qdisc *sch)
return skb;
}
if (!netif_queue_stopped(sch->dev)) {
long delay = PSCHED_US2JIFFIE(diff);
if (delay <= 0)
delay = 1;
mod_timer(&q->timer, jiffies+delay);
}
if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
sch->q.qlen--;
sch->stats.drops++;
goto retry;
}
delay = PSCHED_US2JIFFIE(diff);
if (delay <= 0)
delay = 1;
mod_timer(&q->timer, jiffies+delay);
sch->flags |= TCQ_F_THROTTLED;
}
return NULL;
......@@ -195,6 +204,7 @@ static int dly_change(struct Qdisc *sch, struct rtattr *opt)
} else {
q->latency = qopt->latency;
q->limit = qopt->limit;
q->loss = qopt->loss;
}
return err;
}
......@@ -231,6 +241,7 @@ static int dly_dump(struct Qdisc *sch, struct sk_buff *skb)
qopt.latency = q->latency;
qopt.limit = q->limit;
qopt.loss = q->loss;
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
......
......@@ -1721,7 +1721,6 @@ hfsc_dequeue(struct Qdisc *sch)
cl = actlist_get_minvt(&q->root, cur_time);
if (cl == NULL) {
sch->stats.overlimits++;
if (!netif_queue_stopped(sch->dev))
hfsc_schedule_watchdog(sch, cur_time);
return NULL;
}
......
......@@ -1008,7 +1008,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
static void htb_delay_by(struct Qdisc *sch,long delay)
{
struct htb_sched *q = (struct htb_sched *)sch->data;
if (netif_queue_stopped(sch->dev)) return;
if (delay <= 0) delay = 1;
if (unlikely(delay > 5*HZ)) {
if (net_ratelimit())
......
......@@ -201,7 +201,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
if (skb) {
psched_time_t now;
long toks;
long toks, delay;
long ptoks = 0;
unsigned int len = skb->len;
......@@ -229,14 +229,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return skb;
}
if (!netif_queue_stopped(sch->dev)) {
long delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
if (delay == 0)
delay = 1;
mod_timer(&q->wd_timer, jiffies+delay);
}
/* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment