Commit 91341fa0 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inet: frags: annotate races around fqdir->dead and fqdir->high_thresh

Both fields can be read/written without synchronization,
add proper accessors and documentation.

Fixes: d5dd8879 ("inet: fix various use-after-free in defrags units")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3ba8c625
......@@ -117,8 +117,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
static inline void fqdir_pre_exit(struct fqdir *fqdir)
{
fqdir->high_thresh = 0; /* prevent creation of new frags */
fqdir->dead = true;
/* Prevent creation of new frags.
* Pairs with READ_ONCE() in inet_frag_find().
*/
WRITE_ONCE(fqdir->high_thresh, 0);
/* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
* and ip6frag_expire_frag_queue().
*/
WRITE_ONCE(fqdir->dead, true);
}
void fqdir_exit(struct fqdir *fqdir);
......
......@@ -67,7 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
struct sk_buff *head;
rcu_read_lock();
if (fq->q.fqdir->dead)
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
if (READ_ONCE(fq->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&fq->q.lock);
......
......@@ -235,9 +235,9 @@ void inet_frag_kill(struct inet_frag_queue *fq)
/* The RCU read lock provides a memory barrier
* guaranteeing that if fqdir->dead is false then
* the hash table destruction will not start until
* after we unlock. Paired with inet_frags_exit_net().
* after we unlock. Paired with fqdir_pre_exit().
*/
if (!fqdir->dead) {
if (!READ_ONCE(fqdir->dead)) {
rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
fqdir->f->rhash_params);
refcount_dec(&fq->refcnt);
......@@ -352,9 +352,11 @@ static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
{
/* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
long high_thresh = READ_ONCE(fqdir->high_thresh);
struct inet_frag_queue *fq = NULL, *prev;
if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh)
if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
return NULL;
rcu_read_lock();
......
......@@ -144,7 +144,8 @@ static void ip_expire(struct timer_list *t)
rcu_read_lock();
if (qp->q.fqdir->dead)
/* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
if (READ_ONCE(qp->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&qp->q.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment