Commit e9ce7cb6 authored by Wei Liu's avatar Wei Liu Committed by David S. Miller

xen-netback: Factor queue-specific data into queue struct

In preparation for multi-queue support in xen-netback, move the
queue-specific data from struct xenvif into struct xenvif_queue, and
update the rest of the code to use this.

Also adds loops over queues where appropriate, even though only one is
configured at this point, and uses alloc_netdev_mq() and the
corresponding multi-queue netif wake/start/stop functions in preparation
for multiple active queues.

Finally, implements a trivial queue selection function suitable for
ndo_select_queue, which simply returns 0 for a single queue and uses
skb_get_hash() to compute the queue index otherwise.
Signed-off-by: default avatarAndrew J. Bennieston <andrew.bennieston@citrix.com>
Signed-off-by: default avatarWei Liu <wei.liu2@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a55d9766
...@@ -99,22 +99,43 @@ struct xenvif_rx_meta { ...@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
*/ */
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
struct xenvif { /* Queue name is interface name with "-qNNN" appended */
/* Unique identifier for this interface. */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
domid_t domid;
unsigned int handle;
/* Is this interface disabled? True when backend discovers /* IRQ name is queue name with "-tx" or "-rx" appended */
* frontend is rogue. #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct xenvif;
struct xenvif_stats {
/* Stats fields to be updated per-queue.
* A subset of struct net_device_stats that contains only the
* fields that are updated in netback.c for each queue.
*/ */
bool disabled; unsigned int rx_bytes;
unsigned int rx_packets;
unsigned int tx_bytes;
unsigned int tx_packets;
/* Additional stats used by xenvif */
unsigned long rx_gso_checksum_fixup;
unsigned long tx_zerocopy_sent;
unsigned long tx_zerocopy_success;
unsigned long tx_zerocopy_fail;
unsigned long tx_frag_overflow;
};
struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int id; /* Queue ID, 0-based */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
struct xenvif *vif; /* Parent VIF */
/* Use NAPI for guest TX */ /* Use NAPI for guest TX */
struct napi_struct napi; struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int tx_irq; unsigned int tx_irq;
/* Only used when feature-split-event-channels = 1 */ /* Only used when feature-split-event-channels = 1 */
char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
struct xen_netif_tx_back_ring tx; struct xen_netif_tx_back_ring tx;
struct sk_buff_head tx_queue; struct sk_buff_head tx_queue;
struct page *mmap_pages[MAX_PENDING_REQS]; struct page *mmap_pages[MAX_PENDING_REQS];
...@@ -150,7 +171,7 @@ struct xenvif { ...@@ -150,7 +171,7 @@ struct xenvif {
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int rx_irq; unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */ /* Only used when feature-split-event-channels = 1 */
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
struct xen_netif_rx_back_ring rx; struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue; struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots; RING_IDX rx_last_skb_slots;
...@@ -165,6 +186,22 @@ struct xenvif { ...@@ -165,6 +186,22 @@ struct xenvif {
*/ */
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
unsigned long credit_usec;
unsigned long remaining_credit;
struct timer_list credit_timeout;
u64 credit_window_start;
/* Statistics */
struct xenvif_stats stats;
};
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
u8 fe_dev_addr[6]; u8 fe_dev_addr[6];
/* Frontend feature information. */ /* Frontend feature information. */
...@@ -178,19 +215,13 @@ struct xenvif { ...@@ -178,19 +215,13 @@ struct xenvif {
/* Internal feature information. */ /* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */ u8 can_queue:1; /* can queue packets for receiver? */
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ /* Is this interface disabled? True when backend discovers
unsigned long credit_bytes; * frontend is rogue.
unsigned long credit_usec; */
unsigned long remaining_credit; bool disabled;
struct timer_list credit_timeout;
u64 credit_window_start;
/* Statistics */ /* Queues */
unsigned long rx_gso_checksum_fixup; struct xenvif_queue *queues;
unsigned long tx_zerocopy_sent;
unsigned long tx_zerocopy_success;
unsigned long tx_zerocopy_fail;
unsigned long tx_frag_overflow;
/* Miscellaneous private stuff. */ /* Miscellaneous private stuff. */
struct net_device *dev; struct net_device *dev;
...@@ -205,7 +236,9 @@ struct xenvif *xenvif_alloc(struct device *parent, ...@@ -205,7 +236,9 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid, domid_t domid,
unsigned int handle); unsigned int handle);
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, int xenvif_init_queue(struct xenvif_queue *queue);
int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn); unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif); void xenvif_disconnect(struct xenvif *vif);
...@@ -216,44 +249,47 @@ void xenvif_xenbus_fini(void); ...@@ -216,44 +249,47 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif); int xenvif_schedulable(struct xenvif *vif);
int xenvif_must_stop_queue(struct xenvif *vif); int xenvif_must_stop_queue(struct xenvif_queue *queue);
int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */ /* (Un)Map communication rings. */
void xenvif_unmap_frontend_rings(struct xenvif *vif); void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_rings(struct xenvif *vif, int xenvif_map_frontend_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref, grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref); grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */ /* Check for SKBs from frontend and schedule backend processing */
void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
/* Prevent the device from generating any further traffic. */ /* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif); void xenvif_carrier_off(struct xenvif *vif);
int xenvif_tx_action(struct xenvif *vif, int budget); int xenvif_tx_action(struct xenvif_queue *queue, int budget);
int xenvif_kthread_guest_rx(void *data); int xenvif_kthread_guest_rx(void *data);
void xenvif_kick_thread(struct xenvif *vif); void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data); int xenvif_dealloc_kthread(void *data);
/* Determine whether the needed number of slots (req) are available, /* Determine whether the needed number of slots (req) are available,
* and set req_event if not. * and set req_event if not.
*/ */
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
void xenvif_stop_queue(struct xenvif *vif); void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */ /* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */ /* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{ {
return MAX_PENDING_REQS - return MAX_PENDING_REQS -
vif->pending_prod + vif->pending_cons; queue->pending_prod + queue->pending_cons;
} }
/* Callback from stack when TX packet can be released */ /* Callback from stack when TX packet can be released */
......
This diff is collapsed.
This diff is collapsed.
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
*/ */
#include "common.h" #include "common.h"
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
struct backend_info { struct backend_info {
struct xenbus_device *dev; struct xenbus_device *dev;
...@@ -34,8 +36,9 @@ struct backend_info { ...@@ -34,8 +36,9 @@ struct backend_info {
u8 have_hotplug_status_watch:1; u8 have_hotplug_status_watch:1;
}; };
static int connect_rings(struct backend_info *); static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
static void connect(struct backend_info *); static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
static void backend_create_xenvif(struct backend_info *be); static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be, static void set_backend_state(struct backend_info *be,
...@@ -485,10 +488,10 @@ static void connect(struct backend_info *be) ...@@ -485,10 +488,10 @@ static void connect(struct backend_info *be)
{ {
int err; int err;
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
unsigned long credit_bytes, credit_usec;
err = connect_rings(be); unsigned int queue_index;
if (err) unsigned int requested_num_queues = 1;
return; struct xenvif_queue *queue;
err = xen_net_read_mac(dev, be->vif->fe_dev_addr); err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
if (err) { if (err) {
...@@ -496,9 +499,34 @@ static void connect(struct backend_info *be) ...@@ -496,9 +499,34 @@ static void connect(struct backend_info *be)
return; return;
} }
xen_net_read_rate(dev, &be->vif->credit_bytes, xen_net_read_rate(dev, &credit_bytes, &credit_usec);
&be->vif->credit_usec); read_xenbus_vif_flags(be);
be->vif->remaining_credit = be->vif->credit_bytes;
be->vif->queues = vzalloc(requested_num_queues *
sizeof(struct xenvif_queue));
rtnl_lock();
netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
rtnl_unlock();
for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
queue = &be->vif->queues[queue_index];
queue->vif = be->vif;
queue->id = queue_index;
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
be->vif->dev->name, queue->id);
err = xenvif_init_queue(queue);
if (err)
goto err;
queue->remaining_credit = credit_bytes;
err = connect_rings(be, queue);
if (err)
goto err;
}
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be); unregister_hotplug_status_watch(be);
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
...@@ -507,18 +535,26 @@ static void connect(struct backend_info *be) ...@@ -507,18 +535,26 @@ static void connect(struct backend_info *be)
if (!err) if (!err)
be->have_hotplug_status_watch = 1; be->have_hotplug_status_watch = 1;
netif_wake_queue(be->vif->dev); netif_tx_wake_all_queues(be->vif->dev);
return;
err:
vfree(be->vif->queues);
be->vif->queues = NULL;
rtnl_lock();
netif_set_real_num_tx_queues(be->vif->dev, 0);
rtnl_unlock();
return;
} }
static int connect_rings(struct backend_info *be) static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
{ {
struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
unsigned long tx_ring_ref, rx_ring_ref; unsigned long tx_ring_ref, rx_ring_ref;
unsigned int tx_evtchn, rx_evtchn, rx_copy; unsigned int tx_evtchn, rx_evtchn;
int err; int err;
int val;
err = xenbus_gather(XBT_NIL, dev->otherend, err = xenbus_gather(XBT_NIL, dev->otherend,
"tx-ring-ref", "%lu", &tx_ring_ref, "tx-ring-ref", "%lu", &tx_ring_ref,
...@@ -546,6 +582,27 @@ static int connect_rings(struct backend_info *be) ...@@ -546,6 +582,27 @@ static int connect_rings(struct backend_info *be)
rx_evtchn = tx_evtchn; rx_evtchn = tx_evtchn;
} }
/* Map the shared frame, irq etc. */
err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port tx %u rx %u",
tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
return err;
}
return 0;
}
static int read_xenbus_vif_flags(struct backend_info *be)
{
struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
unsigned int rx_copy;
int err, val;
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy); &rx_copy);
if (err == -ENOENT) { if (err == -ENOENT) {
...@@ -621,16 +678,6 @@ static int connect_rings(struct backend_info *be) ...@@ -621,16 +678,6 @@ static int connect_rings(struct backend_info *be)
val = 0; val = 0;
vif->ipv6_csum = !!val; vif->ipv6_csum = !!val;
/* Map the shared frame, irq etc. */
err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port tx %u rx %u",
tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
return err;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment