Commit 7ef52737 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents 47d29646 1183f383
......@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
#define DRV_MODULE_VERSION "2.0.8"
#define DRV_MODULE_RELDATE "Feb 15, 2010"
#define DRV_MODULE_VERSION "2.0.9"
#define DRV_MODULE_RELDATE "April 27, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
......@@ -651,8 +651,9 @@ bnx2_napi_enable(struct bnx2 *bp)
}
static void
bnx2_netif_stop(struct bnx2 *bp)
bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
{
if (stop_cnic)
bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
int i;
......@@ -671,13 +672,14 @@ bnx2_netif_stop(struct bnx2 *bp)
}
static void
bnx2_netif_start(struct bnx2 *bp)
bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
if (start_cnic)
bnx2_cnic_start(bp);
}
}
......@@ -4758,8 +4760,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
rc = bnx2_alloc_bad_rbuf(bp);
}
if (bp->flags & BNX2_FLAG_USING_MSIX)
if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
/* Prevent MSIX table reads and write from timing out */
REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
}
return rc;
}
......@@ -6272,12 +6278,12 @@ bnx2_reset_task(struct work_struct *work)
return;
}
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
rtnl_unlock();
}
......@@ -6319,7 +6325,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev))
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, false);
bp->vlgrp = vlgrp;
......@@ -6330,7 +6336,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
bnx2_netif_start(bp);
bnx2_netif_start(bp, false);
}
#endif
......@@ -7050,9 +7056,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 0);
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
}
return 0;
......@@ -7082,7 +7088,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
/* Reset will erase chipset stats; save them */
bnx2_save_stats(bp);
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
......@@ -7110,7 +7116,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_setup_cnic_irq_info(bp);
mutex_unlock(&bp->cnic_lock);
#endif
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
}
return 0;
}
......@@ -7363,7 +7369,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
bnx2_free_skbs(bp);
......@@ -7382,7 +7388,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_shutdown_chip(bp);
else {
bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
}
/* wait for link up */
......@@ -8376,7 +8382,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
flush_scheduled_work();
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
......@@ -8398,7 +8404,7 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
return 0;
}
......@@ -8425,7 +8431,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(dev)) {
bnx2_netif_stop(bp);
bnx2_netif_stop(bp, true);
del_timer_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
......@@ -8482,7 +8488,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(dev))
bnx2_netif_start(bp);
bnx2_netif_start(bp, true);
netif_device_attach(dev);
rtnl_unlock();
......
......@@ -5020,6 +5020,9 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
reg16 &= ~state;
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
if (!pdev->bus->self)
return;
pos = pci_pcie_cap(pdev->bus->self);
pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~state;
......
......@@ -1567,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
while (!(gfar_read(&regs->ievent) &
(IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
spin_event_timeout(((gfar_read(&regs->ievent) &
(IEVENT_GRSC | IEVENT_GTSC)) ==
(IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
}
}
......
......@@ -1804,23 +1804,30 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(1);
media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
SMC_SELECT_BANK(saved_bank);
spin_unlock_irqrestore(&smc->lock, flags);
/* Check for pending interrupt with watchdog flag set: with
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
local_irq_save(flags);
smc_interrupt(dev->irq, dev);
local_irq_restore(flags);
smc->fast_poll = HZ;
}
if (smc->fast_poll) {
smc->fast_poll--;
smc->media.expires = jiffies + HZ/100;
add_timer(&smc->media);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irqrestore(&smc->lock, flags);
return;
}
spin_lock_irqsave(&smc->lock, flags);
saved_bank = inw(ioaddr + BANK_SELECT);
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id < 0)
goto reschedule;
......@@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
unsigned long flags;
spin_lock_irq(&smc->lock);
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_gset(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irq(&smc->lock);
spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
......@@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
unsigned long flags;
spin_lock_irq(&smc->lock);
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_sset(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irq(&smc->lock);
spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
......@@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
unsigned long flags;
spin_lock_irq(&smc->lock);
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
ret = smc_link_ok(dev);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irq(&smc->lock);
spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
......@@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int rc = 0;
u16 saved_bank;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
if (!netif_running(dev))
return -EINVAL;
spin_lock_irq(&smc->lock);
spin_lock_irqsave(&smc->lock, flags);
saved_bank = inw(ioaddr + BANK_SELECT);
SMC_SELECT_BANK(3);
rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irq(&smc->lock);
spin_unlock_irqrestore(&smc->lock, flags);
return rc;
}
......
......@@ -2256,17 +2256,36 @@ static int sbmac_init(struct platform_device *pldev, long long base)
sc->mii_bus = mdiobus_alloc();
if (sc->mii_bus == NULL) {
sbmac_uninitctx(sc);
return -ENOMEM;
err = -ENOMEM;
goto uninit_ctx;
}
sc->mii_bus->name = sbmac_mdio_string;
snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
sc->mii_bus->priv = sc;
sc->mii_bus->read = sbmac_mii_read;
sc->mii_bus->write = sbmac_mii_write;
sc->mii_bus->irq = sc->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; ++i)
sc->mii_bus->irq[i] = SBMAC_PHY_INT;
sc->mii_bus->parent = &pldev->dev;
/*
* Probe PHY address
*/
err = mdiobus_register(sc->mii_bus);
if (err) {
printk(KERN_ERR "%s: unable to register MDIO bus\n",
dev->name);
goto free_mdio;
}
dev_set_drvdata(&pldev->dev, sc->mii_bus);
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "%s.%d: unable to register netdev\n",
sbmac_string, idx);
mdiobus_free(sc->mii_bus);
sbmac_uninitctx(sc);
return err;
goto unreg_mdio;
}
pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
......@@ -2282,19 +2301,15 @@ static int sbmac_init(struct platform_device *pldev, long long base)
pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
dev->name, base, eaddr);
sc->mii_bus->name = sbmac_mdio_string;
snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
sc->mii_bus->priv = sc;
sc->mii_bus->read = sbmac_mii_read;
sc->mii_bus->write = sbmac_mii_write;
sc->mii_bus->irq = sc->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; ++i)
sc->mii_bus->irq[i] = SBMAC_PHY_INT;
sc->mii_bus->parent = &pldev->dev;
dev_set_drvdata(&pldev->dev, sc->mii_bus);
return 0;
unreg_mdio:
mdiobus_unregister(sc->mii_bus);
dev_set_drvdata(&pldev->dev, NULL);
free_mdio:
mdiobus_free(sc->mii_bus);
uninit_ctx:
sbmac_uninitctx(sc);
return err;
}
......@@ -2320,16 +2335,6 @@ static int sbmac_open(struct net_device *dev)
goto out_err;
}
/*
* Probe PHY address
*/
err = mdiobus_register(sc->mii_bus);
if (err) {
printk(KERN_ERR "%s: unable to register MDIO bus\n",
dev->name);
goto out_unirq;
}
sc->sbm_speed = sbmac_speed_none;
sc->sbm_duplex = sbmac_duplex_none;
sc->sbm_fc = sbmac_fc_none;
......@@ -2360,11 +2365,7 @@ static int sbmac_open(struct net_device *dev)
return 0;
out_unregister:
mdiobus_unregister(sc->mii_bus);
out_unirq:
free_irq(dev->irq, dev);
out_err:
return err;
}
......@@ -2553,9 +2554,6 @@ static int sbmac_close(struct net_device *dev)
phy_disconnect(sc->phy_dev);
sc->phy_dev = NULL;
mdiobus_unregister(sc->mii_bus);
free_irq(dev->irq, dev);
sbdma_emptyring(&(sc->sbm_txdma));
......@@ -2662,6 +2660,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
unregister_netdev(dev);
sbmac_uninitctx(sc);
mdiobus_unregister(sc->mii_bus);
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
......
......@@ -1870,6 +1870,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
}
if (disabled) {
dev_close(efx->net_dev);
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
......@@ -1893,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data)
}
rtnl_lock();
if (efx_reset(efx, efx->reset_pending))
dev_close(efx->net_dev);
(void)efx_reset(efx, efx->reset_pending);
rtnl_unlock();
}
......
......@@ -1326,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
falcon_probe_board(efx, board_rev);
rc = falcon_probe_board(efx, board_rev);
if (rc)
goto fail2;
kfree(nvconfig);
return 0;
......
......@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
},
};
static const struct falcon_board_type falcon_dummy_board = {
.init = efx_port_dummy_op_int,
.init_phy = efx_port_dummy_op_void,
.fini = efx_port_dummy_op_void,
.set_id_led = efx_port_dummy_op_set_id_led,
.monitor = efx_port_dummy_op_int,
};
void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
......@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
return 0;
} else {
EFX_ERR(efx, "unknown board type %d\n", type_id);
board->type = &falcon_dummy_board;
return -ENODEV;
}
}
......@@ -158,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type;
**************************************************************************
*/
extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
......
......@@ -475,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
static void siena_update_nic_stats(struct efx_nic *efx)
{
while (siena_try_update_nic_stats(efx) == -EAGAIN)
cpu_relax();
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
return;
udelay(100);
}
/* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
......
......@@ -397,4 +397,13 @@ config USB_IPHETH
For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
config USB_SIERRA_NET
tristate "USB-to-WWAN Driver for Sierra Wireless modems"
depends on USB_USBNET
help
Choose this option if you have a Sierra Wireless USB-to-WWAN device.
To compile this driver as a module, choose M here: the
module will be called sierra_net.
endmenu
......@@ -24,4 +24,5 @@ obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
......@@ -466,6 +466,7 @@ static const struct driver_info mbm_info = {
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
.manage_power = cdc_manage_power,
};
/*-------------------------------------------------------------------------*/
......
......@@ -122,25 +122,25 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (tx_urb == NULL)
goto error;
goto error_nomem;
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (rx_urb == NULL)
goto error;
goto free_tx_urb;
tx_buf = usb_buffer_alloc(iphone->udev,
IPHETH_BUF_SIZE,
GFP_KERNEL,
&tx_urb->transfer_dma);
if (tx_buf == NULL)
goto error;
goto free_rx_urb;
rx_buf = usb_buffer_alloc(iphone->udev,
IPHETH_BUF_SIZE,
GFP_KERNEL,
&rx_urb->transfer_dma);
if (rx_buf == NULL)
goto error;
goto free_tx_buf;
iphone->tx_urb = tx_urb;
......@@ -149,13 +149,14 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
iphone->rx_buf = rx_buf;
return 0;
error:
usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf,
rx_urb->transfer_dma);
free_tx_buf:
usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
tx_urb->transfer_dma);
free_rx_urb:
usb_free_urb(rx_urb);
free_tx_urb:
usb_free_urb(tx_urb);
error_nomem:
return -ENOMEM;
}
......
......@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
{ USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
......
This diff is collapsed.
......@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
u32 idx, i;
i = (*index) % ring_limit;
(*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
idx %= ring_limit;
while (i != idx) {
......
......@@ -107,6 +107,7 @@ typedef enum {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk, int len);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
......
......@@ -778,6 +778,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
struct sock *);
......
......@@ -74,7 +74,7 @@
printk(KERN_DEBUG msg); } while (0)
#else
/* Validate arguments and do nothing */
static void inline int __attribute__ ((format (printf, 2, 3)))
static inline void __attribute__ ((format (printf, 2, 3)))
SOCK_DEBUG(struct sock *sk, const char *msg, ...)
{
}
......
......@@ -1626,6 +1626,9 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
if (IS_ERR(skb))
err = PTR_ERR(skb);
else
err = l2cap_do_send(sk, skb);
goto done;
}
......
......@@ -70,17 +70,13 @@ int inet_csk_bind_conflict(const struct sock *sk,
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr ||
sk2_rcv_saddr == sk_rcv_saddr)
break;
} else if (reuse && sk2->sk_reuse &&
sk2_rcv_saddr &&
sk2_rcv_saddr == sk_rcv_saddr)
break;
}
}
}
return node != NULL;
......@@ -124,13 +120,11 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
}
}
}
goto next;
}
break;
......
......@@ -42,16 +42,11 @@ int inet6_csk_bind_conflict(const struct sock *sk,
if (sk != sk2 &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if ((!sk->sk_reuse || !sk2->sk_reuse ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
else if (sk->sk_reuse && sk2->sk_reuse &&
!ipv6_addr_any(inet6_rcv_saddr(sk)) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
}
return node != NULL;
......
......@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
sctp_assoc_del_peer(asoc, &trans->ipaddr);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
sctp_assoc_rm_peer(asoc, trans);
continue;
}
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
......
......@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Use SCTP specific send buffer space queues. */
ep->sndbuf_policy = sctp_sndbuf_policy;
sk->sk_data_ready = sctp_data_ready;
sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
......
......@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
cpu_to_be16(sizeof(struct sctp_paramhdr)),
};
/* A helper to initialize to initialize an op error inside a
/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
......@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
}
/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk. Differs from sctp_init_cause in that it won't oops
* if there isn't enough space in the op error chunk
*/
int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
size_t paylen)
{
sctp_errhdr_t err;
__u16 len;
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
err.length = htons(len);
if (skb_tailroom(chunk->skb) > len)
return -ENOSPC;
chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
sizeof(sctp_errhdr_t),
&err);
return 0;
}
/* 3.3.2 Initiation (INIT) (1)
*
* This chunk is used to initiate a SCTP association between two
......@@ -208,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
chunksize = sizeof(init) + addrs_len;
chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
......@@ -238,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
chunksize += ntohs(auth_hmacs->length);
chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
chunksize += ntohs(auth_chunks->length);
chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
......@@ -255,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
......@@ -397,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
chunksize += ntohs(auth_hmacs->length);
chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
chunksize += ntohs(auth_chunks->length);
chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
......@@ -412,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
......@@ -1129,6 +1155,24 @@ static struct sctp_chunk *sctp_make_op_error_space(
return retval;
}
/* Create an Operation Error chunk of a fixed size,
* specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
* This is a helper function to allocate an error chunk for
* for those invalid parameter codes in which we may not want
* to report all the errors, if the incomming chunk is large
*/
static inline struct sctp_chunk *sctp_make_op_error_fixed(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
size_t size = asoc ? asoc->pathmtu : 0;
if (!size)
size = SCTP_DEFAULT_MAXSEGMENT;
return sctp_make_op_error_space(asoc, chunk, size);
}
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
......@@ -1371,6 +1415,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
* space in the chunk
*/
void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
int len, const void *data)
{
if (skb_tailroom(chunk->skb) > len)
return sctp_addto_chunk(chunk, len, data);
else
return NULL;
}
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
......@@ -1974,13 +2030,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
* returning multiple unknown parameters.
*/
if (NULL == *errp)
*errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
*errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
sctp_addto_chunk(*errp,
sctp_addto_chunk_fixed(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
} else {
......@@ -3315,21 +3370,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
/* Send the next asconf chunk from the addip chunk queue. */
if (!list_empty(&asoc->addip_chunk_list)) {
struct list_head *entry = asoc->addip_chunk_list.next;
asconf = list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(asconf);
if (sctp_primitive_ASCONF(asoc, asconf))
sctp_chunk_free(asconf);
else
asoc->addip_last_asconf = asconf;
}
return retval;
}
......
......@@ -962,6 +962,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
}
/* Sent the next ASCONF packet currently stored in the association.
* This happens after the ASCONF_ACK was succeffully processed.
*/
static void sctp_cmd_send_asconf(struct sctp_association *asoc)
{
/* Send the next asconf chunk from the addip chunk
* queue.
*/
if (!list_empty(&asoc->addip_chunk_list)) {
struct list_head *entry = asoc->addip_chunk_list.next;
struct sctp_chunk *asconf = list_entry(entry,
struct sctp_chunk, list);
list_del_init(entry);
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(asconf);
if (sctp_primitive_ASCONF(asoc, asconf))
sctp_chunk_free(asconf);
else
asoc->addip_last_asconf = asconf;
}
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
......@@ -1617,6 +1640,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
......
......@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack))
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
......
......@@ -3719,9 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
......@@ -3738,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
ep = sctp_sk(sk)->ep;
sctp_endpoint_free(ep);
percpu_counter_dec(&sctp_sockets_allocated);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
......@@ -6185,6 +6185,19 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
goto out;
}
void sctp_data_ready(struct sock *sk, int len)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment