Commit a4177869 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

xps: add __rcu annotations

Avoid sparse warnings : add __rcu annotations and use
rcu_dereference_protected() where necessary.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b02038a1
...@@ -622,7 +622,7 @@ struct xps_map { ...@@ -622,7 +622,7 @@ struct xps_map {
*/ */
struct xps_dev_maps { struct xps_dev_maps {
struct rcu_head rcu; struct rcu_head rcu;
struct xps_map *cpu_map[0]; struct xps_map __rcu *cpu_map[0];
}; };
#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * sizeof(struct xps_map *))) (nr_cpu_ids * sizeof(struct xps_map *)))
...@@ -1049,7 +1049,7 @@ struct net_device { ...@@ -1049,7 +1049,7 @@ struct net_device {
spinlock_t tx_global_lock; spinlock_t tx_global_lock;
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
struct xps_dev_maps *xps_maps; struct xps_dev_maps __rcu *xps_maps;
#endif #endif
/* These may be needed for future network-power-down code. */ /* These may be needed for future network-power-down code. */
......
...@@ -899,6 +899,8 @@ static void xps_dev_maps_release(struct rcu_head *rcu) ...@@ -899,6 +899,8 @@ static void xps_dev_maps_release(struct rcu_head *rcu)
} }
static DEFINE_MUTEX(xps_map_mutex); static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
static ssize_t store_xps_map(struct netdev_queue *queue, static ssize_t store_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute, struct netdev_queue_attribute *attribute,
...@@ -935,11 +937,12 @@ static ssize_t store_xps_map(struct netdev_queue *queue, ...@@ -935,11 +937,12 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
mutex_lock(&xps_map_mutex); mutex_lock(&xps_map_mutex);
dev_maps = dev->xps_maps; dev_maps = xmap_dereference(dev->xps_maps);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
new_map = map = dev_maps ? dev_maps->cpu_map[cpu] : NULL; map = dev_maps ?
xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
new_map = map;
if (map) { if (map) {
for (pos = 0; pos < map->len; pos++) for (pos = 0; pos < map->len; pos++)
if (map->queues[pos] == index) if (map->queues[pos] == index)
...@@ -975,13 +978,14 @@ static ssize_t store_xps_map(struct netdev_queue *queue, ...@@ -975,13 +978,14 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
else else
new_map = NULL; new_map = NULL;
} }
new_dev_maps->cpu_map[cpu] = new_map; RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
} }
/* Cleanup old maps */ /* Cleanup old maps */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
map = dev_maps ? dev_maps->cpu_map[cpu] : NULL; map = dev_maps ?
if (map && new_dev_maps->cpu_map[cpu] != map) xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
call_rcu(&map->rcu, xps_map_release); call_rcu(&map->rcu, xps_map_release);
if (new_dev_maps->cpu_map[cpu]) if (new_dev_maps->cpu_map[cpu])
nonempty = 1; nonempty = 1;
...@@ -1007,7 +1011,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue, ...@@ -1007,7 +1011,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
if (new_dev_maps) if (new_dev_maps)
for_each_possible_cpu(i) for_each_possible_cpu(i)
kfree(new_dev_maps->cpu_map[i]); kfree(rcu_dereference_protected(
new_dev_maps->cpu_map[i],
1));
kfree(new_dev_maps); kfree(new_dev_maps);
free_cpumask_var(mask); free_cpumask_var(mask);
return -ENOMEM; return -ENOMEM;
...@@ -1033,11 +1039,11 @@ static void netdev_queue_release(struct kobject *kobj) ...@@ -1033,11 +1039,11 @@ static void netdev_queue_release(struct kobject *kobj)
index = get_netdev_queue_index(queue); index = get_netdev_queue_index(queue);
mutex_lock(&xps_map_mutex); mutex_lock(&xps_map_mutex);
dev_maps = dev->xps_maps; dev_maps = xmap_dereference(dev->xps_maps);
if (dev_maps) { if (dev_maps) {
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
map = dev_maps->cpu_map[i]; map = xmap_dereference(dev_maps->cpu_map[i]);
if (!map) if (!map)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment