Commit 044ab86d authored by Antoine Tenart's avatar Antoine Tenart Committed by David S. Miller

net: move the xps maps to an array

Move the xps maps (xps_cpus_map and xps_rxqs_map) to an array in
net_device. That will simplify a lot the code removing the need for lots
of if/else conditionals as the correct map will be available using its
offset in the array.

This should not modify the xps maps behaviour in any way.
Suggested-by: default avatarAlexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: default avatarAntoine Tenart <atenart@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6f36158e
...@@ -2015,7 +2015,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi) ...@@ -2015,7 +2015,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
} }
virtqueue_set_affinity(vi->rq[i].vq, mask); virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask); virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
cpumask_clear(mask); cpumask_clear(mask);
} }
......
...@@ -754,6 +754,13 @@ struct rx_queue_attribute { ...@@ -754,6 +754,13 @@ struct rx_queue_attribute {
const char *buf, size_t len); const char *buf, size_t len);
}; };
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
XPS_CPUS = 0,
XPS_RXQS,
XPS_MAPS_MAX,
};
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
/* /*
* This structure holds an XPS map which can be of variable length. The * This structure holds an XPS map which can be of variable length. The
...@@ -1773,8 +1780,7 @@ enum netdev_ml_priv_type { ...@@ -1773,8 +1780,7 @@ enum netdev_ml_priv_type {
* @tx_queue_len: Max frames per queue allowed * @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one * @tx_global_lock: XXX: need comments on this one
* @xdp_bulkq: XDP device bulk queue * @xdp_bulkq: XDP device bulk queue
* @xps_cpus_map: all CPUs map for XPS device * @xps_maps: all CPUs/RXQs maps for XPS device
* @xps_rxqs_map: all RXQs map for XPS device
* *
* @xps_maps: XXX: need comments on this one * @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for * @miniq_egress: clsact qdisc specific data for
...@@ -2070,8 +2076,7 @@ struct net_device { ...@@ -2070,8 +2076,7 @@ struct net_device {
struct xdp_dev_bulk_queue __percpu *xdp_bulkq; struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_cpus_map; struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
struct xps_dev_maps __rcu *xps_rxqs_map;
#endif #endif
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress; struct mini_Qdisc __rcu *miniq_egress;
...@@ -3701,7 +3706,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) ...@@ -3701,7 +3706,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index); u16 index);
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, bool is_rxqs_map); u16 index, enum xps_map_type type);
/** /**
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
...@@ -3796,7 +3801,7 @@ static inline int netif_set_xps_queue(struct net_device *dev, ...@@ -3796,7 +3801,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
static inline int __netif_set_xps_queue(struct net_device *dev, static inline int __netif_set_xps_queue(struct net_device *dev,
const unsigned long *mask, const unsigned long *mask,
u16 index, bool is_rxqs_map) u16 index, enum xps_map_type type)
{ {
return 0; return 0;
} }
......
...@@ -2511,31 +2511,34 @@ static bool remove_xps_queue_cpu(struct net_device *dev, ...@@ -2511,31 +2511,34 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
static void reset_xps_maps(struct net_device *dev, static void reset_xps_maps(struct net_device *dev,
struct xps_dev_maps *dev_maps, struct xps_dev_maps *dev_maps,
bool is_rxqs_map) enum xps_map_type type)
{ {
if (is_rxqs_map) {
static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
} else {
RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
}
static_key_slow_dec_cpuslocked(&xps_needed); static_key_slow_dec_cpuslocked(&xps_needed);
if (type == XPS_RXQS)
static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
RCU_INIT_POINTER(dev->xps_maps[type], NULL);
kfree_rcu(dev_maps, rcu); kfree_rcu(dev_maps, rcu);
} }
static void clean_xps_maps(struct net_device *dev, static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
struct xps_dev_maps *dev_maps, u16 offset, u16 count, u16 offset, u16 count)
bool is_rxqs_map)
{ {
struct xps_dev_maps *dev_maps;
bool active = false; bool active = false;
int i, j; int i, j;
dev_maps = xmap_dereference(dev->xps_maps[type]);
if (!dev_maps)
return;
for (j = 0; j < dev_maps->nr_ids; j++) for (j = 0; j < dev_maps->nr_ids; j++)
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
if (!active) if (!active)
reset_xps_maps(dev, dev_maps, is_rxqs_map); reset_xps_maps(dev, dev_maps, type);
if (!is_rxqs_map) { if (type == XPS_CPUS) {
for (i = offset + (count - 1); count--; i--) for (i = offset + (count - 1); count--; i--)
netdev_queue_numa_node_write( netdev_queue_numa_node_write(
netdev_get_tx_queue(dev, i), NUMA_NO_NODE); netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
...@@ -2545,27 +2548,17 @@ static void clean_xps_maps(struct net_device *dev, ...@@ -2545,27 +2548,17 @@ static void clean_xps_maps(struct net_device *dev,
static void netif_reset_xps_queues(struct net_device *dev, u16 offset, static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
u16 count) u16 count)
{ {
struct xps_dev_maps *dev_maps;
if (!static_key_false(&xps_needed)) if (!static_key_false(&xps_needed))
return; return;
cpus_read_lock(); cpus_read_lock();
mutex_lock(&xps_map_mutex); mutex_lock(&xps_map_mutex);
if (static_key_false(&xps_rxqs_needed)) { if (static_key_false(&xps_rxqs_needed))
dev_maps = xmap_dereference(dev->xps_rxqs_map); clean_xps_maps(dev, XPS_RXQS, offset, count);
if (dev_maps)
clean_xps_maps(dev, dev_maps, offset, count, true);
}
dev_maps = xmap_dereference(dev->xps_cpus_map);
if (!dev_maps)
goto out_no_maps;
clean_xps_maps(dev, dev_maps, offset, count, false); clean_xps_maps(dev, XPS_CPUS, offset, count);
out_no_maps:
mutex_unlock(&xps_map_mutex); mutex_unlock(&xps_map_mutex);
cpus_read_unlock(); cpus_read_unlock();
} }
...@@ -2617,7 +2610,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, ...@@ -2617,7 +2610,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
/* Must be called under cpus_read_lock */ /* Must be called under cpus_read_lock */
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, bool is_rxqs_map) u16 index, enum xps_map_type type)
{ {
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
const unsigned long *online_mask = NULL; const unsigned long *online_mask = NULL;
...@@ -2642,15 +2635,15 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2642,15 +2635,15 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
} }
mutex_lock(&xps_map_mutex); mutex_lock(&xps_map_mutex);
if (is_rxqs_map) {
dev_maps = xmap_dereference(dev->xps_maps[type]);
if (type == XPS_RXQS) {
maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
dev_maps = xmap_dereference(dev->xps_rxqs_map);
nr_ids = dev->num_rx_queues; nr_ids = dev->num_rx_queues;
} else { } else {
maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
if (num_possible_cpus() > 1) if (num_possible_cpus() > 1)
online_mask = cpumask_bits(cpu_online_mask); online_mask = cpumask_bits(cpu_online_mask);
dev_maps = xmap_dereference(dev->xps_cpus_map);
nr_ids = nr_cpu_ids; nr_ids = nr_cpu_ids;
} }
...@@ -2683,7 +2676,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2683,7 +2676,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
tci = j * num_tc + tc; tci = j * num_tc + tc;
map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
map = expand_xps_map(map, j, index, is_rxqs_map); map = expand_xps_map(map, j, index, type == XPS_RXQS);
if (!map) if (!map)
goto error; goto error;
...@@ -2696,7 +2689,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2696,7 +2689,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (!dev_maps) { if (!dev_maps) {
/* Increment static keys at most once per type */ /* Increment static keys at most once per type */
static_key_slow_inc_cpuslocked(&xps_needed); static_key_slow_inc_cpuslocked(&xps_needed);
if (is_rxqs_map) if (type == XPS_RXQS)
static_key_slow_inc_cpuslocked(&xps_rxqs_needed); static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
} }
...@@ -2725,7 +2718,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2725,7 +2718,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (pos == map->len) if (pos == map->len)
map->queues[map->len++] = index; map->queues[map->len++] = index;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (!is_rxqs_map) { if (type == XPS_CPUS) {
if (numa_node_id == -2) if (numa_node_id == -2)
numa_node_id = cpu_to_node(j); numa_node_id = cpu_to_node(j);
else if (numa_node_id != cpu_to_node(j)) else if (numa_node_id != cpu_to_node(j))
...@@ -2746,10 +2739,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2746,10 +2739,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
} }
} }
if (is_rxqs_map) rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
else
rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
/* Cleanup old maps */ /* Cleanup old maps */
if (!dev_maps) if (!dev_maps)
...@@ -2778,12 +2768,11 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2778,12 +2768,11 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
active = true; active = true;
out_no_new_maps: out_no_new_maps:
if (!is_rxqs_map) { if (type == XPS_CPUS)
/* update Tx queue numa node */ /* update Tx queue numa node */
netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
(numa_node_id >= 0) ? (numa_node_id >= 0) ?
numa_node_id : NUMA_NO_NODE); numa_node_id : NUMA_NO_NODE);
}
if (!dev_maps) if (!dev_maps)
goto out_no_maps; goto out_no_maps;
...@@ -2801,7 +2790,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, ...@@ -2801,7 +2790,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
/* free map if not active */ /* free map if not active */
if (!active) if (!active)
reset_xps_maps(dev, dev_maps, is_rxqs_map); reset_xps_maps(dev, dev_maps, type);
out_no_maps: out_no_maps:
mutex_unlock(&xps_map_mutex); mutex_unlock(&xps_map_mutex);
...@@ -2833,7 +2822,7 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, ...@@ -2833,7 +2822,7 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
int ret; int ret;
cpus_read_lock(); cpus_read_lock();
ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
cpus_read_unlock(); cpus_read_unlock();
return ret; return ret;
...@@ -3983,7 +3972,7 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, ...@@ -3983,7 +3972,7 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
if (!static_key_false(&xps_rxqs_needed)) if (!static_key_false(&xps_rxqs_needed))
goto get_cpus_map; goto get_cpus_map;
dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
if (dev_maps) { if (dev_maps) {
int tci = sk_rx_queue_get(sk); int tci = sk_rx_queue_get(sk);
...@@ -3994,7 +3983,7 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, ...@@ -3994,7 +3983,7 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
get_cpus_map: get_cpus_map:
if (queue_index < 0) { if (queue_index < 0) {
dev_maps = rcu_dereference(sb_dev->xps_cpus_map); dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
if (dev_maps) { if (dev_maps) {
unsigned int tci = skb->sender_cpu - 1; unsigned int tci = skb->sender_cpu - 1;
......
...@@ -1388,7 +1388,7 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, ...@@ -1388,7 +1388,7 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
} }
rcu_read_lock(); rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_cpus_map); dev_maps = rcu_dereference(dev->xps_maps[XPS_CPUS]);
nr_ids = dev_maps ? dev_maps->nr_ids : nr_cpu_ids; nr_ids = dev_maps ? dev_maps->nr_ids : nr_cpu_ids;
mask = bitmap_zalloc(nr_ids, GFP_KERNEL); mask = bitmap_zalloc(nr_ids, GFP_KERNEL);
...@@ -1492,7 +1492,7 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) ...@@ -1492,7 +1492,7 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
} }
rcu_read_lock(); rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_rxqs_map); dev_maps = rcu_dereference(dev->xps_maps[XPS_RXQS]);
nr_ids = dev_maps ? dev_maps->nr_ids : dev->num_rx_queues; nr_ids = dev_maps ? dev_maps->nr_ids : dev->num_rx_queues;
mask = bitmap_zalloc(nr_ids, GFP_KERNEL); mask = bitmap_zalloc(nr_ids, GFP_KERNEL);
...@@ -1566,7 +1566,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, ...@@ -1566,7 +1566,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
} }
cpus_read_lock(); cpus_read_lock();
err = __netif_set_xps_queue(dev, mask, index, true); err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS);
cpus_read_unlock(); cpus_read_unlock();
rtnl_unlock(); rtnl_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment