Commit bdec4196 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Just a pile of random fixes, including:

   1) Do not apply TSO limits to non-TSO packets, fix from Herbert Xu.

   2) MDI{,X} eeprom check in e100 driver is reversed, from John W.
      Linville.

   3) Missing error return assignments in several ethernet drivers, from
      Julia Lawall.

   4) Altera TSE device doesn't come back up after ifconfig down/up
      sequence, fix from Kostya Belezko.

   5) Add more cases to the check for whether the qmi_wwan device has a
      bogus MAC address and needs to be assigned a random one.  From
      Kristian Evensen.

   6) Fix interrupt hangs in CPSW, from Felipe Balbi.

   7) Implement ndo_features_check in r8152 so that the stack doesn't
      feed GSO packets which are outside of the chip's capabilities.
      From Hayes Wang"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
  qla3xxx: don't allow never end busy loop
  xen-netback: fixing the propagation of the transmit shaper timeout
  r8152: support ndo_features_check
  batman-adv: fix potential TT client + orig-node memory leak
  batman-adv: fix multicast counter when purging originators
  batman-adv: fix counter for multicast supporting nodes
  batman-adv: fix lock class for decoding hash in network-coding.c
  batman-adv: fix delayed foreign originator recognition
  batman-adv: fix and simplify condition when bonding should be used
  Revert "mac80211: Fix accounting of the tailroom-needed counter"
  net: ethernet: cpsw: fix hangs with interrupts
  enic: free all rq buffs when allocation fails
  qmi_wwan: Set random MAC on devices with buggy fw
  openvswitch: Consistently include VLAN header in flow and port stats.
  tcp: Do not apply TSO segment limit to non-TSO packets
  Altera TSE: Add missing phydev
  net/mlx4_core: Fix error flow in mlx4_init_hca()
  net/mlx4_core: Correcly update the mtt's offset in the MR re-reg flow
  qlcnic: Fix return value in qlcnic_probe()
  net: axienet: fix error return code
  ...
parents 0adc1803 2abad79a
...@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev) ...@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
} }
db->clk = devm_clk_get(&pdev->dev, NULL); db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
goto out; goto out;
}
clk_prepare_enable(db->clk); clk_prepare_enable(db->clk);
......
...@@ -1170,10 +1170,6 @@ static int tse_open(struct net_device *dev) ...@@ -1170,10 +1170,6 @@ static int tse_open(struct net_device *dev)
init_error: init_error:
free_skbufs(dev); free_skbufs(dev);
alloc_skbuf_error: alloc_skbuf_error:
if (priv->phydev) {
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
phy_error: phy_error:
return ret; return ret;
} }
...@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev) ...@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
int ret; int ret;
unsigned long int flags; unsigned long int flags;
/* Stop and disconnect the PHY */ /* Stop the PHY */
if (priv->phydev) { if (priv->phydev)
phy_stop(priv->phydev); phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
netif_stop_queue(dev); netif_stop_queue(dev);
napi_disable(&priv->napi); napi_disable(&priv->napi);
...@@ -1525,6 +1518,10 @@ static int altera_tse_probe(struct platform_device *pdev) ...@@ -1525,6 +1518,10 @@ static int altera_tse_probe(struct platform_device *pdev)
static int altera_tse_remove(struct platform_device *pdev) static int altera_tse_remove(struct platform_device *pdev)
{ {
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
if (priv->phydev)
phy_disconnect(priv->phydev);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev); altera_tse_mdio_destroy(ndev);
......
...@@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev) ...@@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
if (vnic_rq_desc_used(&enic->rq[i]) == 0) { if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n"); netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out_notify_unset; goto err_out_free_rq;
} }
} }
...@@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev) ...@@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
return 0; return 0;
err_out_notify_unset: err_out_free_rq:
for (i = 0; i < enic->rq_count; i++)
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic); enic_dev_notify_unset(enic);
err_out_free_intr: err_out_free_intr:
enic_free_intr(enic); enic_free_intr(enic);
......
...@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) ...@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */ /* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
......
...@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (desc_n >= ring->count || desc_n < 0) { if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n); "descriptor %d not found\n", desc_n);
return; goto out;
} }
if (!is_rx_ring) { if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n); txd = I40E_TX_DESC(ring, desc_n);
...@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else { } else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
} }
out:
kfree(ring); kfree(ring);
} }
......
...@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ...@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask; u32 swmask = mask;
u32 fwmask = mask << 16; u32 fwmask = mask << 16;
s32 ret_val = 0; s32 ret_val = 0;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ s32 i = 0, timeout = 200;
while (i < timeout) { while (i < timeout) {
if (igb_get_hw_semaphore(hw)) { if (igb_get_hw_semaphore(hw)) {
......
...@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap); err = mlx4_dev_cap(dev, &dev_cap);
if (err) { if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto err_stop_fw; return err;
} }
choose_steering_mode(dev, &dev_cap); choose_steering_mode(dev, &dev_cap);
...@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
&init_hca); &init_hca);
if ((long long) icm_size < 0) { if ((long long) icm_size < 0) {
err = icm_size; err = icm_size;
goto err_stop_fw; return err;
} }
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
...@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err) if (err)
goto err_stop_fw; return err;
err = mlx4_INIT_HCA(dev, &init_hca); err = mlx4_INIT_HCA(dev, &init_hca);
if (err) { if (err) {
...@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_query_func(dev, &dev_cap); err = mlx4_query_func(dev, &dev_cap);
if (err < 0) { if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
goto err_stop_fw; goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap.max_eqs; dev->caps.num_eqs = dev_cap.max_eqs;
dev->caps.reserved_eqs = dev_cap.reserved_eqs; dev->caps.reserved_eqs = dev_cap.reserved_eqs;
...@@ -2006,11 +2006,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -2006,11 +2006,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (!mlx4_is_slave(dev)) if (!mlx4_is_slave(dev))
mlx4_free_icms(dev); mlx4_free_icms(dev);
err_stop_fw:
if (!mlx4_is_slave(dev)) {
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
}
return err; return err;
} }
......
...@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free); ...@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{ {
mlx4_mtt_cleanup(dev, &mr->mtt); mlx4_mtt_cleanup(dev, &mr->mtt);
mr->mtt.order = -1;
} }
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
...@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, ...@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{ {
int err; int err;
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(size);
mpt_entry->entity_size = cpu_to_be32(page_shift);
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err) if (err)
return err; return err;
mpt_entry->start = cpu_to_be64(mr->iova);
mpt_entry->length = cpu_to_be64(mr->size);
mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
MLX4_MPT_PD_FLAG_EN_INV); MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
......
...@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL); &mgp->cmd_bus, GFP_KERNEL);
if (mgp->cmd == NULL) if (!mgp->cmd) {
status = -ENOMEM;
goto abort_with_enabled; goto abort_with_enabled;
}
mgp->board_span = pci_resource_len(pdev, 0); mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0); mgp->iomem_base = pci_resource_start(pdev, 0);
......
...@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) ...@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{ {
int i = 0; int i = 0;
while (i < 10) { do {
if (i)
ssleep(1);
if (ql_sem_lock(qdev, if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK, QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
...@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) ...@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n"); "driver lock acquired\n");
return 1; return 1;
} }
} ssleep(1);
} while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0; return 0;
......
...@@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else { } else {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__); "%s: failed. Please Reboot\n", __func__);
err = -ENODEV;
goto err_out_free_hw; goto err_out_free_hw;
} }
......
...@@ -757,6 +757,14 @@ static void cpsw_rx_handler(void *token, int len, int status) ...@@ -757,6 +757,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
static irqreturn_t cpsw_interrupt(int irq, void *dev_id) static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{ {
struct cpsw_priv *priv = dev_id; struct cpsw_priv *priv = dev_id;
int value = irq - priv->irqs_table[0];
/* NOTICE: Ending IRQ here. The trick with the 'value' variable above
* is to make sure we will always write the correct value to the EOI
* register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
* for TX Interrupt and 3 for MISC Interrupt.
*/
cpdma_ctlr_eoi(priv->dma, value);
cpsw_intr_disable(priv); cpsw_intr_disable(priv);
if (priv->irq_enabled == true) { if (priv->irq_enabled == true) {
...@@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) ...@@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
int num_tx, num_rx; int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128); num_tx = cpdma_chan_process(priv->txch, 128);
if (num_tx)
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
num_rx = cpdma_chan_process(priv->rxch, budget); num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) { if (num_rx < budget) {
...@@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) ...@@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
napi_complete(napi); napi_complete(napi);
cpsw_intr_enable(priv); cpsw_intr_enable(priv);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0); prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) { if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true; prim_cpsw->irq_enabled = true;
...@@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev) ...@@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
napi_enable(&priv->napi); napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma); cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv); cpsw_intr_enable(priv);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
prim_cpsw = cpsw_get_slave_priv(priv, 0); prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) { if (prim_cpsw->irq_enabled == false) {
...@@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) ...@@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch); cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true); cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv); cpsw_intr_enable(priv);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
} }
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
...@@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) ...@@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv); cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true); cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv); cpsw_intr_enable(priv);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
} }
#endif #endif
......
...@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op) ...@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0); lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) { if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n"); dev_err(&op->dev, "could not map temac regs.\n");
rc = -ENOMEM;
goto nodev; goto nodev;
} }
...@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op) ...@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) { if (!np) {
dev_err(&op->dev, "could not find DMA node\n"); dev_err(&op->dev, "could not find DMA node\n");
rc = -ENODEV;
goto err_iounmap; goto err_iounmap;
} }
......
...@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op) ...@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0); lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) { if (!lp->regs) {
dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
ret = -ENOMEM;
goto nodev; goto nodev;
} }
/* Setup checksum offload, but default to off if not specified */ /* Setup checksum offload, but default to off if not specified */
...@@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op) ...@@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
if (!np) { if (!np) {
dev_err(&op->dev, "could not find DMA node\n"); dev_err(&op->dev, "could not find DMA node\n");
ret = -ENODEV;
goto err_iounmap; goto err_iounmap;
} }
lp->dma_regs = of_iomap(np, 0); lp->dma_regs = of_iomap(np, 0);
......
...@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) ...@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) { if (!res) {
dev_err(dev, "no IRQ found\n"); dev_err(dev, "no IRQ found\n");
rc = -ENXIO;
goto error; goto error;
} }
......
...@@ -56,6 +56,8 @@ struct qmi_wwan_state { ...@@ -56,6 +56,8 @@ struct qmi_wwan_state {
/* default ethernet address used by the modem */ /* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
/* Make up an ethernet header if the packet doesn't have one. /* Make up an ethernet header if the packet doesn't have one.
* *
* A firmware bug common among several devices cause them to send raw * A firmware bug common among several devices cause them to send raw
...@@ -332,10 +334,12 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) ...@@ -332,10 +334,12 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
usb_driver_release_interface(driver, info->data); usb_driver_release_interface(driver, info->data);
} }
/* Never use the same address on both ends of the link, even /* Never use the same address on both ends of the link, even if the
* if the buggy firmware told us to. * buggy firmware told us to. Or, if device is assigned the well-known
* buggy firmware MAC address, replace it with a random address,
*/ */
if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
eth_hw_addr_random(dev->net); eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */ /* make MAC addr easily distinguishable from an IP header */
......
...@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) ...@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
netif_wake_queue(netdev); netif_wake_queue(netdev);
} }
static netdev_features_t
rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
u32 mss = skb_shinfo(skb)->gso_size;
int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
int offset = skb_transport_offset(skb);
if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
features &= ~NETIF_F_GSO_MASK;
return features;
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
...@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { ...@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_set_mac_address = rtl8152_set_mac_address, .ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = rtl8152_change_mtu, .ndo_change_mtu = rtl8152_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_features_check = rtl8152_features_check,
}; };
static void r8152b_get_version(struct r8152 *tp) static void r8152b_get_version(struct r8152 *tp)
......
...@@ -737,6 +737,7 @@ static void connect(struct backend_info *be) ...@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
} }
queue->remaining_credit = credit_bytes; queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;
err = connect_rings(be, queue); err = connect_rings(be, queue);
if (err) { if (err) {
......
...@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); ...@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* *
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this * driver to indicate that it requires IV generation for this
* particular key. Setting this flag does not necessarily mean that SKBs * particular key.
* will have sufficient tailroom for ICV or MIC.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC * the driver for a TKIP key if it requires Michael MIC
* generation in software. * generation in software.
...@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); ...@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV * if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with * itself should not be generated. Do not set together with
* @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
* not necessarily mean that SKBs will have sufficient tailroom for ICV or
* MIC.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware * management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames * crypto implementation that doesn't deal with management frames
......
...@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, ...@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
if (orig_initialized) if (orig_initialized)
atomic_dec(&bat_priv->mcast.num_disabled); atomic_dec(&bat_priv->mcast.num_disabled);
orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
/* If mcast support is being switched off increase the disabled /* If mcast support is being switched off or if this is an initial
* mcast node counter. * OGM without mcast support then increase the disabled mcast
* node counter.
*/ */
} else if (!orig_mcast_enabled && } else if (!orig_mcast_enabled &&
orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
!orig_initialized)) {
atomic_inc(&bat_priv->mcast.num_disabled); atomic_inc(&bat_priv->mcast.num_disabled);
orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
} }
...@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) ...@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
{ {
struct batadv_priv *bat_priv = orig->bat_priv; struct batadv_priv *bat_priv = orig->bat_priv;
if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
atomic_dec(&bat_priv->mcast.num_disabled); atomic_dec(&bat_priv->mcast.num_disabled);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
......
...@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) ...@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
if (!bat_priv->nc.decoding_hash) if (!bat_priv->nc.decoding_hash)
goto err; goto err;
batadv_hash_set_lock_class(bat_priv->nc.coding_hash, batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key); &batadv_nc_decoding_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
......
...@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) ...@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
batadv_frag_purge_orig(orig_node, NULL); batadv_frag_purge_orig(orig_node, NULL);
batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
"originator timed out");
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
...@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, ...@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
atomic_set(&orig_node->last_ttvn, 0); atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL; orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0; orig_node->tt_buff_len = 0;
orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time; orig_node->bcast_seqno_reset = reset_time;
#ifdef CONFIG_BATMAN_ADV_MCAST #ifdef CONFIG_BATMAN_ADV_MCAST
...@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) ...@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
if (batadv_purge_orig_node(bat_priv, orig_node)) { if (batadv_purge_orig_node(bat_priv, orig_node)) {
batadv_gw_node_delete(bat_priv, orig_node); batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry); hlist_del_rcu(&orig_node->hash_entry);
batadv_tt_global_del_orig(orig_node->bat_priv,
orig_node, -1,
"originator timed out");
batadv_orig_node_free_ref(orig_node); batadv_orig_node_free_ref(orig_node);
continue; continue;
} }
......
...@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv, ...@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
router = batadv_orig_router_get(orig_node, recv_if); router = batadv_orig_router_get(orig_node, recv_if);
if (!router)
return router;
/* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
* and if activated. * and if activated.
*/ */
if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
!router)
return router; return router;
/* bonding: loop through the list of possible routers found /* bonding: loop through the list of possible routers found
......
...@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break; break;
if (tso_segs == 1) { if (tso_segs == 1 || !max_segs) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now, if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ? (tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH)))) nonagle : TCP_NAGLE_PUSH))))
...@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} }
limit = mss_now; limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp)) if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now, limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int, min_t(unsigned int,
cwnd_quota, cwnd_quota,
......
...@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) ...@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) { if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--; sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
...@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) ...@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta; sta = key->sta;
sdata = key->sdata; sdata = key->sdata;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata); increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata, ret = drv_set_key(key->local, DISABLE_KEY, sdata,
...@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) ...@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(key->sdata); increment_tailroom_need_count(key->sdata);
} }
......
...@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, ...@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{ {
struct flow_stats *stats; struct flow_stats *stats;
int node = numa_node_id(); int node = numa_node_id();
int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[node]); stats = rcu_dereference(flow->stats[node]);
...@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, ...@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(new_stats)) { if (likely(new_stats)) {
new_stats->used = jiffies; new_stats->used = jiffies;
new_stats->packet_count = 1; new_stats->packet_count = 1;
new_stats->byte_count = skb->len; new_stats->byte_count = len;
new_stats->tcp_flags = tcp_flags; new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock); spin_lock_init(&new_stats->lock);
...@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, ...@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
stats->used = jiffies; stats->used = jiffies;
stats->packet_count++; stats->packet_count++;
stats->byte_count += skb->len; stats->byte_count += len;
stats->tcp_flags |= tcp_flags; stats->tcp_flags |= tcp_flags;
unlock: unlock:
spin_unlock(&stats->lock); spin_unlock(&stats->lock);
......
...@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, ...@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats = this_cpu_ptr(vport->percpu_stats); stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->rx_packets++; stats->rx_packets++;
stats->rx_bytes += skb->len; stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport; OVS_CB(skb)->input_vport = vport;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment