Commit 3ef0c7a7 authored by Manfred Spraul's avatar Manfred Spraul Committed by Paul E. McKenney

net/netfilter/nf_conntrack_core: Fix net_conntrack_lock()

As we want to remove spin_unlock_wait() and replace it with explicit
spin_lock()/spin_unlock() calls, we can use this to simplify the
locking.

In addition:
- Reading nf_conntrack_locks_all needs ACQUIRE memory ordering.
- The new code avoids the backwards loop.

Only slightly tested, I did not manage to trigger calls to
nf_conntrack_all_lock().

V2: With improved comments, to clearly show how the barriers
    pair.

Fixes: b16c2919 ("netfilter: nf_conntrack: use safer way to lock all buckets")
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Cc: <stable@vger.kernel.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Cc: netfilter-devel@vger.kernel.org
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 931ab4a5
...@@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrack_gc_work; ...@@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrack_gc_work;
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{ {
/* 1) Acquire the lock */
spin_lock(lock); spin_lock(lock);
while (unlikely(nf_conntrack_locks_all)) {
spin_unlock(lock);
/* /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
* Order the 'nf_conntrack_locks_all' load vs. the * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
* spin_unlock_wait() loads below, to ensure */
* that 'nf_conntrack_locks_all_lock' is indeed held: if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
*/ return;
smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
spin_unlock_wait(&nf_conntrack_locks_all_lock); /* fast path failed, unlock */
spin_lock(lock); spin_unlock(lock);
}
/* Slow path 1) get global lock */
spin_lock(&nf_conntrack_locks_all_lock);
/* Slow path 2) get the lock we want */
spin_lock(lock);
/* Slow path 3) release the global lock */
spin_unlock(&nf_conntrack_locks_all_lock);
} }
EXPORT_SYMBOL_GPL(nf_conntrack_lock); EXPORT_SYMBOL_GPL(nf_conntrack_lock);
...@@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void) ...@@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void)
int i; int i;
spin_lock(&nf_conntrack_locks_all_lock); spin_lock(&nf_conntrack_locks_all_lock);
nf_conntrack_locks_all = true;
/* nf_conntrack_locks_all = true;
* Order the above store of 'nf_conntrack_locks_all' against
* the spin_unlock_wait() loads below, such that if
* nf_conntrack_lock() observes 'nf_conntrack_locks_all'
* we must observe nf_conntrack_locks[] held:
*/
smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
for (i = 0; i < CONNTRACK_LOCKS; i++) { for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_unlock_wait(&nf_conntrack_locks[i]); spin_lock(&nf_conntrack_locks[i]);
/* This spin_unlock provides the "release" to ensure that
* nf_conntrack_locks_all==true is visible to everyone that
* acquired spin_lock(&nf_conntrack_locks[]).
*/
spin_unlock(&nf_conntrack_locks[i]);
} }
} }
static void nf_conntrack_all_unlock(void) static void nf_conntrack_all_unlock(void)
{ {
/* /* All prior stores must be complete before we clear
* All prior stores must be complete before we clear
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
* might observe the false value but not the entire * might observe the false value but not the entire
* critical section: * critical section.
* It pairs with the smp_load_acquire() in nf_conntrack_lock()
*/ */
smp_store_release(&nf_conntrack_locks_all, false); smp_store_release(&nf_conntrack_locks_all, false);
spin_unlock(&nf_conntrack_locks_all_lock); spin_unlock(&nf_conntrack_locks_all_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment