Commit de903512 authored by Roland Dreier's avatar Roland Dreier Committed by David S. Miller

[IPoIB]: Convert to netdevice internal stats

Use the stats member of struct netdevice in IPoIB, so we can save
memory by deleting the stats member of struct ipoib_dev_priv, and save
code by deleting ipoib_get_stats().
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 58711741
...@@ -280,8 +280,6 @@ struct ipoib_dev_priv { ...@@ -280,8 +280,6 @@ struct ipoib_dev_priv {
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct net_device_stats stats;
struct net_device *parent; struct net_device *parent;
struct list_head child_intfs; struct list_head child_intfs;
struct list_head list; struct list_head list;
......
...@@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg(priv, "cm recv error " ipoib_dbg(priv, "cm recv error "
"(status=%d, wrid=%d vend_err %x)\n", "(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err); wc->status, wr_id, wc->vendor_err);
++priv->stats.rx_dropped; ++dev->stats.rx_dropped;
goto repost; goto repost;
} }
...@@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
* this packet and reuse the old buffer. * this packet and reuse the old buffer.
*/ */
ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
++priv->stats.rx_dropped; ++dev->stats.rx_dropped;
goto repost; goto repost;
} }
...@@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IPOIB_ENCAP_LEN); skb_pull(skb, IPOIB_ENCAP_LEN);
dev->last_rx = jiffies; dev->last_rx = jiffies;
++priv->stats.rx_packets; ++dev->stats.rx_packets;
priv->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
skb->dev = dev; skb->dev = dev;
/* XXX get correct PACKET_ type here */ /* XXX get correct PACKET_ type here */
...@@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ...@@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
if (unlikely(skb->len > tx->mtu)) { if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, tx->mtu); skb->len, tx->mtu);
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
++priv->stats.tx_errors; ++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return; return;
} }
...@@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ...@@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
tx_req->skb = skb; tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++priv->stats.tx_errors; ++dev->stats.tx_errors;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
...@@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ...@@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len))) { addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n"); ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors; ++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else { } else {
...@@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx ...@@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */ /* FIXME: is this right? Shouldn't we only increment on success? */
++priv->stats.tx_packets; ++dev->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len; dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
......
...@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
* this packet and reuse the old buffer. * this packet and reuse the old buffer.
*/ */
if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
++priv->stats.rx_dropped; ++dev->stats.rx_dropped;
goto repost; goto repost;
} }
...@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IPOIB_ENCAP_LEN); skb_pull(skb, IPOIB_ENCAP_LEN);
dev->last_rx = jiffies; dev->last_rx = jiffies;
++priv->stats.rx_packets; ++dev->stats.rx_packets;
priv->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
skb->dev = dev; skb->dev = dev;
/* XXX get correct PACKET_ type here */ /* XXX get correct PACKET_ type here */
...@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
ib_dma_unmap_single(priv->ca, tx_req->mapping, ib_dma_unmap_single(priv->ca, tx_req->mapping,
tx_req->skb->len, DMA_TO_DEVICE); tx_req->skb->len, DMA_TO_DEVICE);
++priv->stats.tx_packets; ++dev->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len; dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
...@@ -362,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -362,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
++priv->stats.tx_errors; ++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
return; return;
} }
...@@ -383,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -383,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++priv->stats.tx_errors; ++dev->stats.tx_errors;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
...@@ -392,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -392,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) { address->ah, qpn, addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n"); ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors; ++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else { } else {
......
...@@ -517,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -517,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
neigh = ipoib_neigh_alloc(skb->dst->neighbour); neigh = ipoib_neigh_alloc(skb->dst->neighbour);
if (!neigh) { if (!neigh) {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
...@@ -582,7 +582,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -582,7 +582,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
err_path: err_path:
ipoib_neigh_free(dev, neigh); ipoib_neigh_free(dev, neigh);
err_drop: err_drop:
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
...@@ -631,7 +631,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -631,7 +631,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
} else } else
__path_add(dev, path); __path_add(dev, path);
} else { } else {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
...@@ -650,7 +650,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -650,7 +650,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
skb_push(skb, sizeof *phdr); skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb); __skb_queue_tail(&path->queue, skb);
} else { } else {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
...@@ -718,7 +718,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -718,7 +718,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
__skb_queue_tail(&neigh->queue, skb); __skb_queue_tail(&neigh->queue, skb);
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
} else { } else {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
} else { } else {
...@@ -744,7 +744,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -744,7 +744,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
IPOIB_QPN(phdr->hwaddr), IPOIB_QPN(phdr->hwaddr),
IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
goto out; goto out;
} }
...@@ -758,13 +758,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -758,13 +758,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
return &priv->stats;
}
static void ipoib_timeout(struct net_device *dev) static void ipoib_timeout(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
...@@ -865,7 +858,7 @@ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) ...@@ -865,7 +858,7 @@ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
struct sk_buff *skb; struct sk_buff *skb;
*to_ipoib_neigh(neigh->neighbour) = NULL; *to_ipoib_neigh(neigh->neighbour) = NULL;
while ((skb = __skb_dequeue(&neigh->queue))) { while ((skb = __skb_dequeue(&neigh->queue))) {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
if (ipoib_cm_get(neigh)) if (ipoib_cm_get(neigh))
...@@ -952,7 +945,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -952,7 +945,6 @@ static void ipoib_setup(struct net_device *dev)
dev->stop = ipoib_stop; dev->stop = ipoib_stop;
dev->change_mtu = ipoib_change_mtu; dev->change_mtu = ipoib_change_mtu;
dev->hard_start_xmit = ipoib_start_xmit; dev->hard_start_xmit = ipoib_start_xmit;
dev->get_stats = ipoib_get_stats;
dev->tx_timeout = ipoib_timeout; dev->tx_timeout = ipoib_timeout;
dev->header_ops = &ipoib_header_ops; dev->header_ops = &ipoib_header_ops;
dev->set_multicast_list = ipoib_set_mcast_list; dev->set_multicast_list = ipoib_set_mcast_list;
......
...@@ -125,7 +125,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -125,7 +125,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
} }
spin_lock_irqsave(&priv->tx_lock, flags); spin_lock_irqsave(&priv->tx_lock, flags);
priv->stats.tx_dropped += tx_dropped; dev->stats.tx_dropped += tx_dropped;
spin_unlock_irqrestore(&priv->tx_lock, flags); spin_unlock_irqrestore(&priv->tx_lock, flags);
kfree(mcast); kfree(mcast);
...@@ -320,7 +320,7 @@ ipoib_mcast_sendonly_join_complete(int status, ...@@ -320,7 +320,7 @@ ipoib_mcast_sendonly_join_complete(int status,
/* Flush out any queued packets */ /* Flush out any queued packets */
spin_lock_irq(&priv->tx_lock); spin_lock_irq(&priv->tx_lock);
while (!skb_queue_empty(&mcast->pkt_queue)) { while (!skb_queue_empty(&mcast->pkt_queue)) {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
} }
spin_unlock_irq(&priv->tx_lock); spin_unlock_irq(&priv->tx_lock);
...@@ -675,7 +675,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -675,7 +675,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
!priv->broadcast || !priv->broadcast ||
!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto unlock; goto unlock;
} }
...@@ -690,7 +690,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -690,7 +690,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
if (!mcast) { if (!mcast) {
ipoib_warn(priv, "unable to allocate memory for " ipoib_warn(priv, "unable to allocate memory for "
"multicast structure\n"); "multicast structure\n");
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto out; goto out;
} }
...@@ -705,7 +705,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -705,7 +705,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
skb_queue_tail(&mcast->pkt_queue, skb); skb_queue_tail(&mcast->pkt_queue, skb);
else { else {
++priv->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment