Commit 9044f940 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fill in ethtool link parameters for all link types in cxgb4, from
    Hariprasad Shenai.

 2) Fix probe regressions in stmmac driver, from Huacai Chen.

 3) Network namespace leaks on errirs in rtnetlink, from Nicolas
    Dichtel.

 4) Remove erroneous BUG check which can actually trigger legitimately,
    in xen-netfront.  From Seth Forshee.

 5) Validate length of IFLA_BOND_ARP_IP_TARGET netlink attributes, from
    Thomas Grag.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  cxgb4: Fill in supported link mode for SFP modules
  xen-netfront: Remove BUGs on paged skb data which crosses a page boundary
  sh_eth: Fix sleeping function called from invalid context
  stmmac: platform: Move plat_dat checking earlier
  sh_eth: Fix skb alloc size and alignment adjust rule.
  rtnetlink: release net refcnt on error in do_setlink()
  bond: Check length of IFLA_BOND_ARP_IP_TARGET attributes
parents 23c836ce 4c2d5186
......@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
bond_option_arp_ip_targets_clear(bond);
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
__be32 target = nla_get_be32(attr);
__be32 target;
if (nla_len(attr) < sizeof(target))
return -EINVAL;
target = nla_get_be32(attr);
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
......
......@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
v |= SUPPORTED_FIBRE;
else if (type == FW_PORT_TYPE_BP40_BA)
if (caps & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseT_Full;
if (caps & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
} else if (type == FW_PORT_TYPE_BP40_BA)
v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
......
......@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
return ret;
}
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
int reserve;
uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
if (reserve)
skb_reserve(skb, reserve);
skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
}
#else
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
}
#endif
/* CPU <-> EDMAC endian convert */
......@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
......@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
DMA_FROM_DEVICE);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
......@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
......@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
mdp->rx_buf_sz,
ALIGN(mdp->rx_buf_sz, 16),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
......@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
dma_map_single(&ndev->dev, skb->data,
rxdesc->buffer_length, DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
......@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
mdp->is_opened = 1;
return ret;
out_free_irq:
......@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (sh_eth_is_rz_fast_ether(mdp))
return &ndev->stats;
if (!mdp->is_opened)
return &ndev->stats;
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
ndev->stats.collisions += sh_eth_read(ndev, CDCR);
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
if (sh_eth_is_gether(mdp)) {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
} else {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
}
return &ndev->stats;
}
/* device close function */
static int sh_eth_close(struct net_device *ndev)
{
......@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
......@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
pm_runtime_put_sync(&mdp->pdev->dev);
return 0;
}
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (sh_eth_is_rz_fast_ether(mdp))
return &ndev->stats;
mdp->is_opened = 0;
pm_runtime_get_sync(&mdp->pdev->dev);
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
ndev->stats.collisions += sh_eth_read(ndev, CDCR);
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
if (sh_eth_is_gether(mdp)) {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
} else {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
}
pm_runtime_put_sync(&mdp->pdev->dev);
return &ndev->stats;
return 0;
}
/* ioctl to device function */
......
......@@ -162,9 +162,9 @@ enum {
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
#define SH_ETH_RX_ALIGN 32
#else
#define SH2_SH3_SKB_RX_ALIGN 2
#define SH_ETH_RX_ALIGN 2
#endif
/* Register's bits
......@@ -522,6 +522,7 @@ struct sh_eth_private {
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
unsigned is_opened:1;
};
static inline void sh_eth_soft_swap(char *src, int len)
......
......@@ -265,13 +265,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat = dev_get_platdata(&pdev->dev);
/* Set default value for multicast hash bins */
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev,
sizeof(struct plat_stmmacenet_data),
......@@ -281,6 +274,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return -ENOMEM;
}
/* Set default value for multicast hash bins */
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
......
......@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
len = skb_frag_size(frag);
offset = frag->page_offset;
/* Data must not cross a page boundary. */
BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
......@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
while (len > 0) {
unsigned long bytes;
BUG_ON(offset >= PAGE_SIZE);
bytes = PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
......
......@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
goto errout;
}
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
put_net(net);
err = -EPERM;
goto errout;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment