Commit 5a0e554b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (39 commits)
  Remove Andrew Morton from list of net driver maintainers.
  bonding: Acquire correct locks in alb for promisc change
  bonding: Convert more locks to _bh, acquire rtnl, for new locking
  bonding: Convert locks to _bh, rework alb locking for new locking
  bonding: Convert miimon to new locking
  bonding: Convert balance-rr transmit to new locking
  Convert bonding timers to workqueues
  Update MAINTAINERS to reflect my (jgarzik's) current efforts.
  pasemi_mac: fix typo
  defxx.c: dfx_bus_init() is __devexit not __devinit
  s390 MAINTAINERS
  remove header_ops bug in qeth driver
  sky2: crash on remove
  MIPSnet: Delete all the useless debugging printks.
  AR7 ethernet: small post-merge cleanups and fixes
  mv643xx_eth: Hook up mv643xx_get_sset_count
  mv643xx_eth: Remove obsolete checksum offload comment
  mv643xx_eth: Merge drivers/net/mv643xx_eth.h into mv643xx_eth.c
  mv643xx_eth: Remove unused register defines
  mv643xx_eth: Clean up mv643xx_eth.h
  ...
parents c09b360a 2c800093
......@@ -136,17 +136,6 @@ M: ajk@iehk.rwth-aachen.de
L: linux-hams@vger.kernel.org
S: Maintained
8139CP 10/100 FAST ETHERNET DRIVER
P: Jeff Garzik
M: jgarzik@pobox.com
S: Maintained
8139TOO 10/100 FAST ETHERNET DRIVER
P: Jeff Garzik
M: jgarzik@pobox.com
W: http://sourceforge.net/projects/gkernel/
S: Maintained
8169 10/100/1000 GIGABIT ETHERNET DRIVER
P: Francois Romieu
M: romieu@fr.zoreil.com
......@@ -1043,12 +1032,6 @@ M: kernel@wantstofly.org
L: netdev@vger.kernel.org
S: Maintained
CIRRUS LOGIC GENERIC FBDEV DRIVER
P: Jeff Garzik
M: jgarzik@pobox.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Odd Fixes
CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER
P: Lennert Buytenhek
M: kernel@wantstofly.org
......@@ -1969,12 +1952,6 @@ M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
INTEL I8XX RANDOM NUMBER GENERATOR SUPPORT
P: Jeff Garzik
M: jgarzik@pobox.com
W: http://sourceforge.net/projects/gkernel/
S: Maintained
INTEL IA32 MICROCODE UPDATE SUPPORT
P: Tigran Aivazian
M: tigran@aivazian.fsnet.co.uk
......@@ -2701,8 +2678,6 @@ M: Paul.Clements@steeleye.com
S: Maintained
NETWORK DEVICE DRIVERS
P: Andrew Morton
M: akpm@linux-foundation.org
P: Jeff Garzik
M: jgarzik@pobox.com
L: netdev@vger.kernel.org
......@@ -3254,6 +3229,8 @@ S: Supported
S390 NETWORK DRIVERS
P: Ursula Braun
M: ubraun@linux.vnet.ibm.com
P: Frank Blaschka
M: blaschka@linux.vnet.ibm.com
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
......@@ -4109,10 +4086,6 @@ M: hirofumi@mail.parknet.co.jp
L: linux-kernel@vger.kernel.org
S: Maintained
VIA 82Cxxx AUDIO DRIVER (old OSS driver)
P: Jeff Garzik
S: Odd fixes
VIA RHINE NETWORK DRIVER
P: Roger Luethi
M: rl@hellgate.ch
......
......@@ -2371,13 +2371,16 @@ config UGETH_TX_ON_DEMAND
depends on UCC_GETH
config MV643XX_ETH
tristate "MV-643XX Ethernet support"
depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32)
tristate "Marvell Discovery (643XX) and Orion ethernet support"
depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || ARCH_ORION
select MII
help
This driver supports the gigabit Ethernet on the Marvell MV643XX
chipset which is used in the Momenco Ocelot C and Jaguar ATX and
Pegasos II, amongst other PPC and MIPS boards.
This driver supports the gigabit ethernet MACs in the
Marvell Discovery PPC/MIPS chipset family (MV643XX) and
in the Marvell Orion ARM SoC family.
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
config QLA3XXX
tristate "QLogic QLA3XXX Network Driver Support"
......
......@@ -2076,8 +2076,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
* times out, and it selects an aggregator for the ports that are yet not
* related to any aggregator, and selects the active aggregator for a bond.
*/
void bond_3ad_state_machine_handler(struct bonding *bond)
void bond_3ad_state_machine_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
ad_work.work);
struct port *port;
struct aggregator *aggregator;
......@@ -2128,7 +2130,7 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
}
re_arm:
mod_timer(&(BOND_AD_INFO(bond).ad_timer), jiffies + ad_delta_in_ticks);
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
......
......@@ -276,7 +276,7 @@ struct ad_slave_info {
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast);
int bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct bonding *bond);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_adapter_speed_changed(struct slave *slave);
void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
......
......@@ -128,12 +128,12 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
static inline void _lock_tx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
/* Caller must hold tx_hashtbl lock */
......@@ -305,12 +305,12 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
/*********************** rlb specific functions ***************************/
static inline void _lock_rx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
/* when an ARP REPLY is received from a client update its info
......@@ -472,13 +472,13 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
_unlock_rx_hashtbl(bond);
write_lock(&bond->curr_slave_lock);
write_lock_bh(&bond->curr_slave_lock);
if (slave != bond->curr_active_slave) {
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
}
write_unlock(&bond->curr_slave_lock);
write_unlock_bh(&bond->curr_slave_lock);
}
static void rlb_update_client(struct rlb_client_info *client_info)
......@@ -959,19 +959,34 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
return 0;
}
/* Caller must hold bond lock for write or curr_slave_lock for write*/
/*
* Swap MAC addresses between two slaves.
*
* Called with RTNL held, and no other locks.
*
*/
static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
{
struct slave *disabled_slave = NULL;
u8 tmp_mac_addr[ETH_ALEN];
int slaves_state_differ;
slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
}
/*
* Send learning packets after MAC address swap.
*
* Called with RTNL and bond->lock held for read.
*/
static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
struct slave *slave2)
{
int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
struct slave *disabled_slave = NULL;
/* fasten the change in the switch */
if (SLAVE_IS_OK(slave1)) {
alb_send_learning_packets(slave1, slave1->dev->dev_addr);
......@@ -1044,7 +1059,9 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
}
if (found) {
/* locking: needs RTNL and nothing else */
alb_swap_mac_addr(bond, slave, tmp_slave);
alb_fasten_mac_swap(bond, slave, tmp_slave);
}
}
}
......@@ -1375,8 +1392,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
return 0;
}
void bond_alb_monitor(struct bonding *bond)
void bond_alb_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *slave;
int i;
......@@ -1436,16 +1455,16 @@ void bond_alb_monitor(struct bonding *bond)
/* handle rlb stuff */
if (bond_info->rlb_enabled) {
/* the following code changes the promiscuity of the
* the curr_active_slave. It needs to be locked with a
* write lock to protect from other code that also
* sets the promiscuity.
*/
write_lock_bh(&bond->curr_slave_lock);
if (bond_info->primary_is_promisc &&
(++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
/*
* dev_set_promiscuity requires rtnl and
* nothing else.
*/
read_unlock(&bond->lock);
rtnl_lock();
bond_info->rlb_promisc_timeout_counter = 0;
/* If the primary was set to promiscuous mode
......@@ -1454,9 +1473,10 @@ void bond_alb_monitor(struct bonding *bond)
*/
dev_set_promiscuity(bond->curr_active_slave->dev, -1);
bond_info->primary_is_promisc = 0;
}
write_unlock_bh(&bond->curr_slave_lock);
rtnl_unlock();
read_lock(&bond->lock);
}
if (bond_info->rlb_rebalance) {
bond_info->rlb_rebalance = 0;
......@@ -1479,7 +1499,7 @@ void bond_alb_monitor(struct bonding *bond)
}
re_arm:
mod_timer(&(bond_info->alb_timer), jiffies + alb_delta_in_ticks);
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
......@@ -1500,11 +1520,11 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
/* caller must hold the bond lock for write since the mac addresses
* are compared and may be swapped.
*/
write_lock_bh(&bond->lock);
read_lock(&bond->lock);
res = alb_handle_addr_collision_on_attach(bond, slave);
write_unlock_bh(&bond->lock);
read_unlock(&bond->lock);
if (res) {
return res;
......@@ -1569,13 +1589,21 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* Set the bond->curr_active_slave to @new_slave and handle
* mac address swapping and promiscuity changes as needed.
*
* Caller must hold bond curr_slave_lock for write (or bond lock for write)
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
* If new_slave is not NULL, caller must hold RTNL, bond->lock for
* read and curr_slave_lock for write. Processing here may sleep, so
* no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
{
struct slave *swap_slave;
int i;
if (new_slave)
ASSERT_RTNL();
if (bond->curr_active_slave == new_slave) {
return;
}
......@@ -1608,6 +1636,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
}
}
/*
* Arrange for swap_slave and new_slave to temporarily be
* ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity.
*/
if (swap_slave) {
tlb_clear_slave(bond, swap_slave, 1);
}
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
......@@ -1616,11 +1657,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
bond->alb_info.rlb_enabled);
}
read_lock(&bond->lock);
if (swap_slave) {
alb_fasten_mac_swap(bond, swap_slave, new_slave);
} else {
/* fasten bond mac on new current slave */
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
write_lock_bh(&bond->curr_slave_lock);
}
/*
* Called with RTNL
*/
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = bond_dev->priv;
......@@ -1657,8 +1710,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
}
}
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
if (swap_slave) {
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
} else {
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
bond->alb_info.rlb_enabled);
......@@ -1670,6 +1727,9 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
}
}
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
return 0;
}
......
......@@ -125,7 +125,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct bonding *bond);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
#endif /* __BOND_ALB_H__ */
......
This diff is collapsed.
......@@ -229,7 +229,7 @@ static ssize_t bonding_show_slaves(struct device *d,
int i, res = 0;
struct bonding *bond = to_bond(d);
read_lock_bh(&bond->lock);
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
......@@ -240,7 +240,7 @@ static ssize_t bonding_show_slaves(struct device *d,
}
res += sprintf(buf + res, "%s ", slave->dev->name);
}
read_unlock_bh(&bond->lock);
read_unlock(&bond->lock);
res += sprintf(buf + res, "\n");
res++;
return res;
......@@ -282,18 +282,18 @@ static ssize_t bonding_store_slaves(struct device *d,
/* Got a slave name in ifname. Is it already in the list? */
found = 0;
read_lock_bh(&bond->lock);
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i)
if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
printk(KERN_ERR DRV_NAME
": %s: Interface %s is already enslaved!\n",
bond->dev->name, ifname);
ret = -EPERM;
read_unlock_bh(&bond->lock);
read_unlock(&bond->lock);
goto out;
}
read_unlock_bh(&bond->lock);
read_unlock(&bond->lock);
printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
bond->dev->name, ifname);
dev = dev_get_by_name(&init_net, ifname);
......@@ -662,12 +662,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
"%s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
/* Kill MII timer, else it brings bond's link down */
if (bond->arp_timer.function) {
printk(KERN_INFO DRV_NAME
": %s: Kill MII timer, else it brings bond's link down...\n",
bond->dev->name);
del_timer_sync(&bond->mii_timer);
if (delayed_work_pending(&bond->mii_work)) {
cancel_delayed_work(&bond->mii_work);
flush_workqueue(bond->wq);
}
}
if (!bond->params.arp_targets[0]) {
......@@ -682,25 +679,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function
* is called.
*/
if (bond->arp_timer.function) {
/* The timer's already set up, so fire it off */
mod_timer(&bond->arp_timer, jiffies + 1);
} else {
/* Set up the timer. */
init_timer(&bond->arp_timer);
bond->arp_timer.expires = jiffies + 1;
bond->arp_timer.data =
(unsigned long) bond->dev;
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
bond->arp_timer.function =
(void *)
&bond_activebackup_arp_mon;
} else {
bond->arp_timer.function =
(void *)
&bond_loadbalance_arp_mon;
}
add_timer(&bond->arp_timer);
if (!delayed_work_pending(&bond->arp_work)) {
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
INIT_DELAYED_WORK(&bond->arp_work,
bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work,
bond_loadbalance_arp_mon);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
}
......@@ -1056,12 +1043,9 @@ static ssize_t bonding_store_miimon(struct device *d,
bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE;
}
/* Kill ARP timer, else it brings bond's link down */
if (bond->mii_timer.function) {
printk(KERN_INFO DRV_NAME
": %s: Kill ARP timer, else it brings bond's link down...\n",
bond->dev->name);
del_timer_sync(&bond->arp_timer);
if (delayed_work_pending(&bond->arp_work)) {
cancel_delayed_work(&bond->arp_work);
flush_workqueue(bond->wq);
}
}
......@@ -1071,18 +1055,11 @@ static ssize_t bonding_store_miimon(struct device *d,
* timer will get fired off when the open function
* is called.
*/
if (bond->mii_timer.function) {
/* The timer's already set up, so fire it off */
mod_timer(&bond->mii_timer, jiffies + 1);
} else {
/* Set up the timer. */
init_timer(&bond->mii_timer);
bond->mii_timer.expires = jiffies + 1;
bond->mii_timer.data =
(unsigned long) bond->dev;
bond->mii_timer.function =
(void *) &bond_mii_monitor;
add_timer(&bond->mii_timer);
if (!delayed_work_pending(&bond->mii_work)) {
INIT_DELAYED_WORK(&bond->mii_work,
bond_mii_monitor);
queue_delayed_work(bond->wq,
&bond->mii_work, 0);
}
}
}
......@@ -1156,6 +1133,9 @@ static ssize_t bonding_store_primary(struct device *d,
}
out:
write_unlock_bh(&bond->lock);
rtnl_unlock();
return count;
}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
......@@ -1213,6 +1193,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
struct bonding *bond = to_bond(d);
int count;
rtnl_lock();
read_lock(&bond->curr_slave_lock);
curr = bond->curr_active_slave;
......@@ -1292,6 +1273,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
}
out:
write_unlock_bh(&bond->lock);
rtnl_unlock();
return count;
}
......
......@@ -184,8 +184,6 @@ struct bonding {
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
rwlock_t lock;
rwlock_t curr_slave_lock;
struct timer_list mii_timer;
struct timer_list arp_timer;
s8 kill_timers;
s8 send_grat_arp;
s8 setup_by_slave;
......@@ -199,12 +197,18 @@ struct bonding {
int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int);
__be32 master_ip;
u16 flags;
u16 rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
struct list_head vlan_list;
struct vlan_group *vlgrp;
struct packet_type arp_mon_pt;
struct workqueue_struct *wq;
struct delayed_work mii_work;
struct delayed_work arp_work;
struct delayed_work alb_work;
struct delayed_work ad_work;
};
/**
......@@ -307,9 +311,9 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla
void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
void bond_mii_monitor(struct net_device *bond_dev);
void bond_loadbalance_arp_mon(struct net_device *bond_dev);
void bond_activebackup_arp_mon(struct net_device *bond_dev);
void bond_mii_monitor(struct work_struct *);
void bond_loadbalance_arp_mon(struct work_struct *);
void bond_activebackup_arp_mon(struct work_struct *);
void bond_set_mode_ops(struct bonding *bond, int mode);
int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
......
......@@ -460,18 +460,11 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
if (unlikely(skb_padto(skb, ETH_ZLEN))) {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING
"%s: tx: padding failed, dropping\n", dev->name);
spin_lock(&priv->lock);
dev->stats.tx_dropped++;
spin_unlock(&priv->lock);
return -ENOMEM;
}
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
len = max(skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
queue = skb->queue_mapping;
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
netif_stop_subqueue(dev, queue);
#else
......@@ -481,13 +474,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &priv->desc_ring[queue];
if (unlikely(desc->dataflags & CPMAC_OWN)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: tx dma ring full, dropping\n",
printk(KERN_WARNING "%s: tx dma ring full\n",
dev->name);
spin_lock(&priv->lock);
dev->stats.tx_dropped++;
spin_unlock(&priv->lock);
dev_kfree_skb_any(skb);
return -ENOMEM;
return NETDEV_TX_BUSY;
}
spin_lock(&priv->lock);
......@@ -509,7 +498,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpmac_dump_skb(dev, skb);
cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
return 0;
return NETDEV_TX_OK;
}
static void cpmac_end_xmit(struct net_device *dev, int queue)
......@@ -646,12 +635,14 @@ static void cpmac_clear_tx(struct net_device *dev)
int i;
if (unlikely(!priv->desc_ring))
return;
for (i = 0; i < CPMAC_QUEUES; i++)
for (i = 0; i < CPMAC_QUEUES; i++) {
priv->desc_ring[i].dataflags = 0;
if (priv->desc_ring[i].skb) {
dev_kfree_skb_any(priv->desc_ring[i].skb);
if (netif_subqueue_stopped(dev, i))
netif_wake_subqueue(dev, i);
}
}
}
static void cpmac_hw_error(struct work_struct *work)
......@@ -727,11 +718,13 @@ static void cpmac_tx_timeout(struct net_device *dev)
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
for (i = 0; i < CPMAC_QUEUES; i++)
if (priv->desc_ring[i].skb) {
priv->desc_ring[i].dataflags = 0;
dev_kfree_skb_any(priv->desc_ring[i].skb);
netif_wake_subqueue(dev, i);
break;
}
#else
priv->desc_ring[0].dataflags = 0;
if (priv->desc_ring[0].skb)
dev_kfree_skb_any(priv->desc_ring[0].skb);
netif_wake_queue(dev);
......@@ -794,7 +787,7 @@ static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam*
{
struct cpmac_priv *priv = netdev_priv(dev);
if (dev->flags && IFF_UP)
if (netif_running(dev))
return -EBUSY;
priv->ring_size = ring->rx_pending;
return 0;
......
......@@ -805,7 +805,7 @@ static void __devinit dfx_bus_init(struct net_device *dev)
* Interrupts are disabled at the adapter bus-specific logic.
*/
static void __devinit dfx_bus_uninit(struct net_device *dev)
static void __devexit dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
......
......@@ -30,6 +30,7 @@ static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
int len)
{
uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
if (available_len < len)
return -EFAULT;
......@@ -45,14 +46,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
int count_to_go = skb->len;
char *buf_ptr = skb->data;
pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
dev->name, __FUNCTION__, skb->len);
outl(skb->len, mipsnet_reg_address(dev, txDataCount));
pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
dev->name, __FUNCTION__, skb->len);
for (; count_to_go; buf_ptr++, count_to_go--)
outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
......@@ -64,10 +59,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
pr_debug("%s:%s(): transmitting %d bytes\n",
dev->name, __FUNCTION__, skb->len);
/* Only one packet at a time. Once TXDONE interrupt is serviced, the
/*
* Only one packet at a time. Once TXDONE interrupt is serviced, the
* queue will be restarted.
*/
netif_stop_queue(dev);
......@@ -94,8 +87,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
pr_debug("%s:%s(): pushing RXed data to kernel\n",
dev->name, __FUNCTION__);
netif_rx(skb);
dev->stats.rx_packets++;
......@@ -112,44 +103,29 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
uint64_t interruptFlags;
if (irq == dev->irq) {
pr_debug("%s:%s(): irq %d for device\n",
dev->name, __FUNCTION__, irq);
retval = IRQ_HANDLED;
interruptFlags =
inl(mipsnet_reg_address(dev, interruptControl));
pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
__FUNCTION__, interruptFlags);
if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
pr_debug("%s:%s(): got TXDone\n",
dev->name, __FUNCTION__);
outl(MIPSNET_INTCTL_TXDONE,
mipsnet_reg_address(dev, interruptControl));
/* only one packet at a time, we are done. */
netif_wake_queue(dev);
} else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
pr_debug("%s:%s(): got RX data\n",
dev->name, __FUNCTION__);
mipsnet_get_fromdev(dev,
inl(mipsnet_reg_address(dev, rxDataCount)));
pr_debug("%s:%s(): clearing RX int\n",
dev->name, __FUNCTION__);
outl(MIPSNET_INTCTL_RXDONE,
mipsnet_reg_address(dev, interruptControl));
} else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
pr_debug("%s:%s(): got test interrupt\n",
dev->name, __FUNCTION__);
/*
* TESTBIT is cleared on read.
* And takes effect after a write with 0
*/
outl(0, mipsnet_reg_address(dev, interruptControl));
} else {
pr_debug("%s:%s(): no valid fags 0x%016llx\n",
dev->name, __FUNCTION__, interruptFlags);
/* Maybe shared IRQ, just ignore, no clearing. */
retval = IRQ_NONE;
}
......@@ -165,22 +141,15 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
static int mipsnet_open(struct net_device *dev)
{
int err;
pr_debug("%s: mipsnet_open\n", dev->name);
err = request_irq(dev->irq, &mipsnet_interrupt,
IRQF_SHARED, dev->name, (void *) dev);
if (err) {
pr_debug("%s: %s(): can't get irq %d\n",
dev->name, __FUNCTION__, dev->irq);
release_region(dev->base_addr, MIPSNET_IO_EXTENT);
return err;
}
pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
dev->name, __FUNCTION__, dev->base_addr, dev->irq);
netif_start_queue(dev);
/* test interrupt handler */
......@@ -193,8 +162,8 @@ static int mipsnet_open(struct net_device *dev)
static int mipsnet_close(struct net_device *dev)
{
pr_debug("%s: %s()\n", dev->name, __FUNCTION__);
netif_stop_queue(dev);
return 0;
}
......@@ -229,9 +198,6 @@ static int __init mipsnet_probe(struct device *dev)
/* Get the io region now, get irq on open() */
if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
"for dev is not availble.\n", netdev->name,
__FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
err = -EBUSY;
goto out_free_netdev;
}
......@@ -295,8 +261,6 @@ static int __init mipsnet_init_module(void)
static void __exit mipsnet_exit_module(void)
{
pr_debug("MIPSNet Ethernet driver exiting\n");
driver_unregister(&mipsnet_driver);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -550,7 +550,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
n = mac->rx->next_to_clean;
prefetch(RX_RING(mac, n));
prefetch(&RX_RING(mac, n));
for (count = 0; count < limit; count++) {
macrx = RX_RING(mac, n);
......
This diff is collapsed.
......@@ -4271,7 +4271,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
del_timer_sync(&hw->watchdog_timer);
cancel_work_sync(&hw->restart_work);
for (i = hw->ports; i >= 0; --i)
for (i = hw->ports-1; i >= 0; --i)
unregister_netdev(hw->dev[i]);
sky2_write32(hw, B0_IMSK, 0);
......@@ -4289,7 +4289,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
for (i = hw->ports; i >= 0; --i)
for (i = hw->ports-1; i >= 0; --i)
free_netdev(hw->dev[i]);
iounmap(hw->regs);
......
This diff is collapsed.
/*
* MV-643XX ethernet platform device data definition file.
*/
#ifndef __LINUX_MV643XX_ETH_H
#define __LINUX_MV643XX_ETH_H
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared"
#define MV643XX_ETH_NAME "mv643xx_eth"
#define MV643XX_ETH_SHARED_REGS 0x2000
#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000
struct mv643xx_eth_platform_data {
int port_number;
u16 force_phy_addr; /* force override if phy_addr == 0 */
u16 phy_addr;
/* If speed is 0, then speed and duplex are autonegotiated. */
int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
/* non-zero values of the following fields override defaults */
u32 tx_queue_size;
u32 rx_queue_size;
u32 tx_sram_addr;
u32 tx_sram_size;
u32 rx_sram_addr;
u32 rx_sram_size;
u8 mac_addr[6]; /* mac address if non-zero*/
};
#endif /* __LINUX_MV643XX_ETH_H */
......@@ -834,7 +834,7 @@ static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const void *daddr, const void *saddr,
unsigned len)
{
if (!dev->header_ops)
if (!dev->header_ops || !dev->header_ops->create)
return 0;
return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment