Commit d51bffd1 authored by Sowmini Varadhan's avatar Sowmini Varadhan Committed by David S. Miller

sunvnet: Use one Tx queue per vnet_port

Use multple Tx netdev queues for sunvnet by supporting a one-to-one
mapping between vnet_port and Tx queue. Provide a ndo_select_queue
indirection (vnet_select_queue()) which selects the queue based
on the peer that would be selected in vnet_start_xmit()
Signed-off-by: default avatarSowmini Varadhan <sowmini.varadhan@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7bd68bfd
...@@ -40,6 +40,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); ...@@ -40,6 +40,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION); MODULE_VERSION(DRV_MODULE_VERSION);
#define VNET_MAX_TXQS 16
/* Heuristic for the number of times to exponentially backoff and /* Heuristic for the number of times to exponentially backoff and
* retry sending an LDC trigger when EAGAIN is encountered * retry sending an LDC trigger when EAGAIN is encountered
*/ */
...@@ -551,6 +553,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) ...@@ -551,6 +553,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct vnet *vp; struct vnet *vp;
u32 end; u32 end;
struct vio_net_desc *desc; struct vio_net_desc *desc;
struct netdev_queue *txq;
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0; return 0;
...@@ -580,7 +584,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) ...@@ -580,7 +584,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
} }
netif_tx_unlock(dev); netif_tx_unlock(dev);
if (unlikely(netif_queue_stopped(dev) && txq = netdev_get_tx_queue(dev, port->q_index);
if (unlikely(netif_tx_queue_stopped(txq) &&
vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
return 1; return 1;
...@@ -608,31 +613,23 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) ...@@ -608,31 +613,23 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0; return 0;
} }
static void maybe_tx_wakeup(struct vnet *vp) /* Got back a STOPPED LDC message on port. If the queue is stopped,
* wake it up so that we'll send out another START message at the
* next TX.
*/
static void maybe_tx_wakeup(struct vnet_port *port)
{ {
struct net_device *dev = vp->dev; struct netdev_queue *txq;
netif_tx_lock(dev); txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
if (likely(netif_queue_stopped(dev))) { __netif_tx_lock(txq, smp_processor_id());
struct vnet_port *port; if (likely(netif_tx_queue_stopped(txq))) {
int wake = 1; struct vio_dring_state *dr;
rcu_read_lock(); dr = &port->vio.drings[VIO_DRIVER_TX_RING];
list_for_each_entry_rcu(port, &vp->port_list, list) { netif_tx_wake_queue(txq);
struct vio_dring_state *dr;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
if (vnet_tx_dring_avail(dr) <
VNET_TX_WAKEUP_THRESH(dr)) {
wake = 0;
break;
}
}
rcu_read_unlock();
if (wake)
netif_wake_queue(dev);
} }
netif_tx_unlock(dev); __netif_tx_unlock(txq);
} }
static inline bool port_is_up(struct vnet_port *vnet) static inline bool port_is_up(struct vnet_port *vnet)
...@@ -748,7 +745,7 @@ static int vnet_event_napi(struct vnet_port *port, int budget) ...@@ -748,7 +745,7 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
break; break;
} }
if (unlikely(tx_wakeup && err != -ECONNRESET)) if (unlikely(tx_wakeup && err != -ECONNRESET))
maybe_tx_wakeup(port->vp); maybe_tx_wakeup(port);
return npkts; return npkts;
} }
...@@ -953,6 +950,16 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, ...@@ -953,6 +950,16 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
return skb; return skb;
} }
static u16
vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
return port->q_index;
}
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct vnet *vp = netdev_priv(dev); struct vnet *vp = netdev_priv(dev);
...@@ -965,6 +972,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -965,6 +972,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
void *start = NULL; void *start = NULL;
int nlen = 0; int nlen = 0;
unsigned pending = 0; unsigned pending = 0;
struct netdev_queue *txq;
skb = vnet_skb_shape(skb, &start, &nlen); skb = vnet_skb_shape(skb, &start, &nlen);
if (unlikely(!skb)) if (unlikely(!skb))
...@@ -1008,9 +1016,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1008,9 +1016,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
dr = &port->vio.drings[VIO_DRIVER_TX_RING]; dr = &port->vio.drings[VIO_DRIVER_TX_RING];
i = skb_get_queue_mapping(skb);
txq = netdev_get_tx_queue(dev, i);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) { if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
if (!netif_queue_stopped(dev)) { if (!netif_tx_queue_stopped(txq)) {
netif_stop_queue(dev); netif_tx_stop_queue(txq);
/* This is a hard error, log it. */ /* This is a hard error, log it. */
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
...@@ -1104,9 +1114,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1104,9 +1114,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) { if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
netif_stop_queue(dev); netif_tx_stop_queue(txq);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
netif_wake_queue(dev); netif_tx_wake_queue(txq);
} }
(void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
...@@ -1139,14 +1149,14 @@ static void vnet_tx_timeout(struct net_device *dev) ...@@ -1139,14 +1149,14 @@ static void vnet_tx_timeout(struct net_device *dev)
static int vnet_open(struct net_device *dev) static int vnet_open(struct net_device *dev)
{ {
netif_carrier_on(dev); netif_carrier_on(dev);
netif_start_queue(dev); netif_tx_start_all_queues(dev);
return 0; return 0;
} }
static int vnet_close(struct net_device *dev) static int vnet_close(struct net_device *dev)
{ {
netif_stop_queue(dev); netif_tx_stop_all_queues(dev);
netif_carrier_off(dev); netif_carrier_off(dev);
return 0; return 0;
...@@ -1420,6 +1430,7 @@ static const struct net_device_ops vnet_ops = { ...@@ -1420,6 +1430,7 @@ static const struct net_device_ops vnet_ops = {
.ndo_tx_timeout = vnet_tx_timeout, .ndo_tx_timeout = vnet_tx_timeout,
.ndo_change_mtu = vnet_change_mtu, .ndo_change_mtu = vnet_change_mtu,
.ndo_start_xmit = vnet_start_xmit, .ndo_start_xmit = vnet_start_xmit,
.ndo_select_queue = vnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vnet_poll_controller, .ndo_poll_controller = vnet_poll_controller,
#endif #endif
...@@ -1431,7 +1442,7 @@ static struct vnet *vnet_new(const u64 *local_mac) ...@@ -1431,7 +1442,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
struct vnet *vp; struct vnet *vp;
int err, i; int err, i;
dev = alloc_etherdev(sizeof(*vp)); dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
if (!dev) if (!dev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
dev->needed_headroom = VNET_PACKET_SKIP + 8; dev->needed_headroom = VNET_PACKET_SKIP + 8;
...@@ -1556,6 +1567,25 @@ static void print_version(void) ...@@ -1556,6 +1567,25 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address"; const char *remote_macaddr_prop = "remote-mac-address";
static void
vnet_port_add_txq(struct vnet_port *port)
{
struct vnet *vp = port->vp;
int n;
n = vp->nports++;
n = n & (VNET_MAX_TXQS - 1);
port->q_index = n;
netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
}
static void
vnet_port_rm_txq(struct vnet_port *port)
{
port->vp->nports--;
netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
}
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{ {
struct mdesc_handle *hp; struct mdesc_handle *hp;
...@@ -1624,6 +1654,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -1624,6 +1654,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
list_add_tail_rcu(&port->list, &vp->port_list); list_add_tail_rcu(&port->list, &vp->port_list);
hlist_add_head_rcu(&port->hash, hlist_add_head_rcu(&port->hash,
&vp->port_hash[vnet_hashfn(port->raddr)]); &vp->port_hash[vnet_hashfn(port->raddr)]);
vnet_port_add_txq(port);
spin_unlock_irqrestore(&vp->lock, flags); spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port); dev_set_drvdata(&vdev->dev, port);
...@@ -1668,6 +1699,7 @@ static int vnet_port_remove(struct vio_dev *vdev) ...@@ -1668,6 +1699,7 @@ static int vnet_port_remove(struct vio_dev *vdev)
synchronize_rcu(); synchronize_rcu();
del_timer_sync(&port->clean_timer); del_timer_sync(&port->clean_timer);
vnet_port_rm_txq(port);
netif_napi_del(&port->napi); netif_napi_del(&port->napi);
vnet_port_free_tx_bufs(port); vnet_port_free_tx_bufs(port);
vio_ldc_free(&port->vio); vio_ldc_free(&port->vio);
......
...@@ -61,6 +61,7 @@ struct vnet_port { ...@@ -61,6 +61,7 @@ struct vnet_port {
u32 napi_stop_idx; u32 napi_stop_idx;
bool napi_resume; bool napi_resume;
int rx_event; int rx_event;
u16 q_index;
}; };
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
...@@ -102,6 +103,7 @@ struct vnet { ...@@ -102,6 +103,7 @@ struct vnet {
struct list_head list; struct list_head list;
u64 local_mac; u64 local_mac;
int nports;
}; };
#endif /* _SUNVNET_H */ #endif /* _SUNVNET_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment