Commit 9b84b808 authored by Javier Cardona's avatar Javier Cardona Committed by John W. Linville

mac80211: Fix locking bug on mesh path table access

The mesh and mpp path tables are accessed from softirq and workqueue
context so non-irq locking cannot be used.  Or at least that's what
PROVE_RCU seems to tell us here:

[  431.240946] =================================
[  431.241061] [ INFO: inconsistent lock state ]
[  431.241061] 2.6.39-rc3-wl+ #354
[  431.241061] ---------------------------------
[  431.241061] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
[  431.241061] kworker/u:1/1423 [HC0[0]:SC0[0]:HE1:SE1] takes:
[  431.241061]  (&(&newtbl->hashwlock[i])->rlock){+.?...}, at:
[<c14671bf>] mesh_path_add+0x167/0x257
Signed-off-by: default avatarJavier Cardona <javier@cozybit.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 9ca99eec
...@@ -55,12 +55,12 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs) ...@@ -55,12 +55,12 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
mesh_hash = tbl->hash_buckets; mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) { for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock(&tbl->hashwlock[i]); spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) { hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs); tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
spin_unlock(&tbl->hashwlock[i]); spin_unlock_bh(&tbl->hashwlock[i]);
} }
__mesh_table_free(tbl); __mesh_table_free(tbl);
} }
...@@ -274,7 +274,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -274,7 +274,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
if (!new_node) if (!new_node)
goto err_node_alloc; goto err_node_alloc;
read_lock(&pathtbl_resize_lock); read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN); memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->sdata = sdata; new_mpath->sdata = sdata;
new_mpath->flags = 0; new_mpath->flags = 0;
...@@ -289,7 +289,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -289,7 +289,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
hash_idx = mesh_table_hash(dst, sdata, mesh_paths); hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
bucket = &mesh_paths->hash_buckets[hash_idx]; bucket = &mesh_paths->hash_buckets[hash_idx];
spin_lock(&mesh_paths->hashwlock[hash_idx]); spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
err = -EEXIST; err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) { hlist_for_each_entry(node, n, bucket, list) {
...@@ -305,8 +305,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -305,8 +305,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
mesh_paths_generation++; mesh_paths_generation++;
spin_unlock(&mesh_paths->hashwlock[hash_idx]); spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
if (grow) { if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work); ieee80211_queue_work(&local->hw, &sdata->work);
...@@ -314,8 +314,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -314,8 +314,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return 0; return 0;
err_exists: err_exists:
spin_unlock(&mesh_paths->hashwlock[hash_idx]); spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node); kfree(new_node);
err_node_alloc: err_node_alloc:
kfree(new_mpath); kfree(new_mpath);
...@@ -332,16 +332,17 @@ void mesh_mpath_table_grow(void) ...@@ -332,16 +332,17 @@ void mesh_mpath_table_grow(void)
newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
if (!newtbl) if (!newtbl)
return; return;
write_lock(&pathtbl_resize_lock); write_lock_bh(&pathtbl_resize_lock);
oldtbl = mesh_paths; oldtbl = mesh_paths;
if (mesh_table_grow(mesh_paths, newtbl) < 0) { if (mesh_table_grow(mesh_paths, newtbl) < 0) {
rcu_read_unlock();
__mesh_table_free(newtbl); __mesh_table_free(newtbl);
write_unlock(&pathtbl_resize_lock); write_unlock_bh(&pathtbl_resize_lock);
return; return;
} }
rcu_read_unlock(); rcu_read_unlock();
rcu_assign_pointer(mesh_paths, newtbl); rcu_assign_pointer(mesh_paths, newtbl);
write_unlock(&pathtbl_resize_lock); write_unlock_bh(&pathtbl_resize_lock);
synchronize_rcu(); synchronize_rcu();
mesh_table_free(oldtbl, false); mesh_table_free(oldtbl, false);
...@@ -355,16 +356,17 @@ void mesh_mpp_table_grow(void) ...@@ -355,16 +356,17 @@ void mesh_mpp_table_grow(void)
newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
if (!newtbl) if (!newtbl)
return; return;
write_lock(&pathtbl_resize_lock); write_lock_bh(&pathtbl_resize_lock);
oldtbl = mpp_paths; oldtbl = mpp_paths;
if (mesh_table_grow(mpp_paths, newtbl) < 0) { if (mesh_table_grow(mpp_paths, newtbl) < 0) {
rcu_read_unlock();
__mesh_table_free(newtbl); __mesh_table_free(newtbl);
write_unlock(&pathtbl_resize_lock); write_unlock_bh(&pathtbl_resize_lock);
return; return;
} }
rcu_read_unlock(); rcu_read_unlock();
rcu_assign_pointer(mpp_paths, newtbl); rcu_assign_pointer(mpp_paths, newtbl);
write_unlock(&pathtbl_resize_lock); write_unlock_bh(&pathtbl_resize_lock);
synchronize_rcu(); synchronize_rcu();
mesh_table_free(oldtbl, false); mesh_table_free(oldtbl, false);
...@@ -398,7 +400,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -398,7 +400,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
if (!new_node) if (!new_node)
goto err_node_alloc; goto err_node_alloc;
read_lock(&pathtbl_resize_lock); read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN); memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_mpath->sdata = sdata; new_mpath->sdata = sdata;
...@@ -411,7 +413,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -411,7 +413,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
hash_idx = mesh_table_hash(dst, sdata, mpp_paths); hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
bucket = &mpp_paths->hash_buckets[hash_idx]; bucket = &mpp_paths->hash_buckets[hash_idx];
spin_lock(&mpp_paths->hashwlock[hash_idx]); spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
err = -EEXIST; err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) { hlist_for_each_entry(node, n, bucket, list) {
...@@ -425,8 +427,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -425,8 +427,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
grow = 1; grow = 1;
spin_unlock(&mpp_paths->hashwlock[hash_idx]); spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
if (grow) { if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work); ieee80211_queue_work(&local->hw, &sdata->work);
...@@ -434,8 +436,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -434,8 +436,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return 0; return 0;
err_exists: err_exists:
spin_unlock(&mpp_paths->hashwlock[hash_idx]); spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node); kfree(new_node);
err_node_alloc: err_node_alloc:
kfree(new_mpath); kfree(new_mpath);
...@@ -548,11 +550,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) ...@@ -548,11 +550,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
int hash_idx; int hash_idx;
int err = 0; int err = 0;
read_lock(&pathtbl_resize_lock); read_lock_bh(&pathtbl_resize_lock);
hash_idx = mesh_table_hash(addr, sdata, mesh_paths); hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
bucket = &mesh_paths->hash_buckets[hash_idx]; bucket = &mesh_paths->hash_buckets[hash_idx];
spin_lock(&mesh_paths->hashwlock[hash_idx]); spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) { hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath; mpath = node->mpath;
if (mpath->sdata == sdata && if (mpath->sdata == sdata &&
...@@ -570,8 +572,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) ...@@ -570,8 +572,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
err = -ENXIO; err = -ENXIO;
enddel: enddel:
mesh_paths_generation++; mesh_paths_generation++;
spin_unlock(&mesh_paths->hashwlock[hash_idx]); spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
return err; return err;
} }
...@@ -723,7 +725,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) ...@@ -723,7 +725,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
struct hlist_node *p; struct hlist_node *p;
int i; int i;
read_lock(&pathtbl_resize_lock); read_lock_bh(&pathtbl_resize_lock);
for_each_mesh_entry(mesh_paths, p, node, i) { for_each_mesh_entry(mesh_paths, p, node, i) {
if (node->mpath->sdata != sdata) if (node->mpath->sdata != sdata)
continue; continue;
...@@ -738,7 +740,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) ...@@ -738,7 +740,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
} else } else
spin_unlock_bh(&mpath->state_lock); spin_unlock_bh(&mpath->state_lock);
} }
read_unlock(&pathtbl_resize_lock); read_unlock_bh(&pathtbl_resize_lock);
} }
void mesh_pathtbl_unregister(void) void mesh_pathtbl_unregister(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment