Commit d1fe1944 authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

inet: frag: don't re-use chainlist for evictor

commit 65ba1f1e ("inet: frags: fix a race between inet_evict_bucket
and inet_frag_kill") describes the bug, but the fix doesn't work reliably.

Problem is that ->flags member can be set on other cpu without chainlock
being held by that task, i.e. the RMW-Cycle can clear INET_FRAG_EVICTED
bit after we put the element on the evictor private list.

We can crash when walking the 'private' evictor list since an element can
be deleted from list underneath the evictor.

Join work with Nikolay Alexandrov.

Fixes: b13d3cbf ("inet: frag: move eviction of queues to work queue")
Reported-by: default avatarJohan Schuijt <johan@transip.nl>
Tested-by: default avatarFrank Schreuder <fschreuder@transip.nl>
Signed-off-by: default avatarNikolay Alexandrov <nikolay@cumulusnetworks.com>
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 81296fc6
...@@ -45,6 +45,7 @@ enum { ...@@ -45,6 +45,7 @@ enum {
* @flags: fragment queue flags * @flags: fragment queue flags
* @max_size: maximum received fragment size * @max_size: maximum received fragment size
* @net: namespace that this frag belongs to * @net: namespace that this frag belongs to
* @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
*/ */
struct inet_frag_queue { struct inet_frag_queue {
spinlock_t lock; spinlock_t lock;
...@@ -59,6 +60,7 @@ struct inet_frag_queue { ...@@ -59,6 +60,7 @@ struct inet_frag_queue {
__u8 flags; __u8 flags;
u16 max_size; u16 max_size;
struct netns_frags *net; struct netns_frags *net;
struct hlist_node list_evictor;
}; };
#define INETFRAGS_HASHSZ 1024 #define INETFRAGS_HASHSZ 1024
......
...@@ -151,14 +151,13 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb) ...@@ -151,14 +151,13 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
} }
fq->flags |= INET_FRAG_EVICTED; fq->flags |= INET_FRAG_EVICTED;
hlist_del(&fq->list); hlist_add_head(&fq->list_evictor, &expired);
hlist_add_head(&fq->list, &expired);
++evicted; ++evicted;
} }
spin_unlock(&hb->chain_lock); spin_unlock(&hb->chain_lock);
hlist_for_each_entry_safe(fq, n, &expired, list) hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
f->frag_expire((unsigned long) fq); f->frag_expire((unsigned long) fq);
return evicted; return evicted;
...@@ -284,8 +283,7 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) ...@@ -284,8 +283,7 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
struct inet_frag_bucket *hb; struct inet_frag_bucket *hb;
hb = get_frag_bucket_locked(fq, f); hb = get_frag_bucket_locked(fq, f);
if (!(fq->flags & INET_FRAG_EVICTED)) hlist_del(&fq->list);
hlist_del(&fq->list);
spin_unlock(&hb->chain_lock); spin_unlock(&hb->chain_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment