Commit b145425f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inetpeer: remove AVL implementation in favor of RB tree

As discussed in Faro during Netfilter Workshop 2017, RB trees can be
used with RCU, using a seqlock.

Note that net/rxrpc/conn_service.c is already using this.

This patch converts inetpeer from AVL tree to RB tree, since it allows
to remove private AVL implementation in favor of shared RB code.

$ size net/ipv4/inetpeer.before net/ipv4/inetpeer.after
   text    data     bss     dec     hex filename
   3195      40     128    3363     d23 net/ipv4/inetpeer.before
   1562      24       0    1586     632 net/ipv4/inetpeer.after

The same technique can be used to speed up
net/netfilter/nft_set_rbtree.c (removing rwlock contention in fast path)
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 27eac47b
...@@ -33,18 +33,12 @@ struct inetpeer_addr { ...@@ -33,18 +33,12 @@ struct inetpeer_addr {
}; };
struct inet_peer { struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */ struct rb_node rb_node;
struct inet_peer __rcu *avl_left, *avl_right;
struct inetpeer_addr daddr; struct inetpeer_addr daddr;
__u32 avl_height;
u32 metrics[RTAX_MAX]; u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */ u32 rate_tokens; /* rate limiting for ICMP */
unsigned long rate_last; unsigned long rate_last;
union {
struct list_head gc_list;
struct rcu_head gc_rcu;
};
/* /*
* Once inet_peer is queued for deletion (refcnt == 0), following field * Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid * is not available: rid
...@@ -55,7 +49,6 @@ struct inet_peer { ...@@ -55,7 +49,6 @@ struct inet_peer {
atomic_t rid; /* Frag reception counter */ atomic_t rid; /* Frag reception counter */
}; };
struct rcu_head rcu; struct rcu_head rcu;
struct inet_peer *gc_next;
}; };
/* following fields might be frequently dirtied */ /* following fields might be frequently dirtied */
...@@ -64,7 +57,7 @@ struct inet_peer { ...@@ -64,7 +57,7 @@ struct inet_peer {
}; };
struct inet_peer_base { struct inet_peer_base {
struct inet_peer __rcu *root; struct rb_root rb_root;
seqlock_t lock; seqlock_t lock;
int total; int total;
}; };
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
* also be removed if the pool is overloaded i.e. if the total amount of * also be removed if the pool is overloaded i.e. if the total amount of
* entries is greater-or-equal than the threshold. * entries is greater-or-equal than the threshold.
* *
* Node pool is organised as an AVL tree. * Node pool is organised as an RB tree.
* Such an implementation has been chosen not just for fun. It's a way to * Such an implementation has been chosen not just for fun. It's a way to
* prevent easy and efficient DoS attacks by creating hash collisions. A huge * prevent easy and efficient DoS attacks by creating hash collisions. A huge
* amount of long living nodes in a single hash slot would significantly delay * amount of long living nodes in a single hash slot would significantly delay
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
* AND reference count being 0. * AND reference count being 0.
* 3. Global variable peer_total is modified under the pool lock. * 3. Global variable peer_total is modified under the pool lock.
* 4. struct inet_peer fields modification: * 4. struct inet_peer fields modification:
* avl_left, avl_right, avl_parent, avl_height: pool lock * rb_node: pool lock
* refcnt: atomically against modifications on other CPU; * refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing * usually under some other lock to prevent node disappearing
* daddr: unchangeable * daddr: unchangeable
...@@ -53,30 +53,15 @@ ...@@ -53,30 +53,15 @@
static struct kmem_cache *peer_cachep __read_mostly; static struct kmem_cache *peer_cachep __read_mostly;
static LIST_HEAD(gc_list);
static const int gc_delay = 60 * HZ;
static struct delayed_work gc_work;
static DEFINE_SPINLOCK(gc_lock);
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
static const struct inet_peer peer_fake_node = {
.avl_left = peer_avl_empty_rcu,
.avl_right = peer_avl_empty_rcu,
.avl_height = 0
};
void inet_peer_base_init(struct inet_peer_base *bp) void inet_peer_base_init(struct inet_peer_base *bp)
{ {
bp->root = peer_avl_empty_rcu; bp->rb_root = RB_ROOT;
seqlock_init(&bp->lock); seqlock_init(&bp->lock);
bp->total = 0; bp->total = 0;
} }
EXPORT_SYMBOL_GPL(inet_peer_base_init); EXPORT_SYMBOL_GPL(inet_peer_base_init);
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ #define PEER_MAX_GC 32
/* Exported for sysctl_net_ipv4. */ /* Exported for sysctl_net_ipv4. */
int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
...@@ -84,53 +69,6 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m ...@@ -84,53 +69,6 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
static void inetpeer_gc_worker(struct work_struct *work)
{
struct inet_peer *p, *n, *c;
struct list_head list;
spin_lock_bh(&gc_lock);
list_replace_init(&gc_list, &list);
spin_unlock_bh(&gc_lock);
if (list_empty(&list))
return;
list_for_each_entry_safe(p, n, &list, gc_list) {
if (need_resched())
cond_resched();
c = rcu_dereference_protected(p->avl_left, 1);
if (c != peer_avl_empty) {
list_add_tail(&c->gc_list, &list);
p->avl_left = peer_avl_empty_rcu;
}
c = rcu_dereference_protected(p->avl_right, 1);
if (c != peer_avl_empty) {
list_add_tail(&c->gc_list, &list);
p->avl_right = peer_avl_empty_rcu;
}
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (refcount_read(&p->refcnt) == 1) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
}
if (list_empty(&list))
return;
spin_lock_bh(&gc_lock);
list_splice(&list, &gc_list);
spin_unlock_bh(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
/* Called from ip_output.c:ip_init */ /* Called from ip_output.c:ip_init */
void __init inet_initpeers(void) void __init inet_initpeers(void)
{ {
...@@ -153,225 +91,62 @@ void __init inet_initpeers(void) ...@@ -153,225 +91,62 @@ void __init inet_initpeers(void)
sizeof(struct inet_peer), sizeof(struct inet_peer),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL); NULL);
INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
} }
#define rcu_deref_locked(X, BASE) \ /* Called with rcu_read_lock() or base->lock held */
rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock)) static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
struct inet_peer_base *base,
/* unsigned int seq,
* Called with local BH disabled and the pool lock held. struct inet_peer *gc_stack[],
*/ unsigned int *gc_cnt,
#define lookup(_daddr, _stack, _base) \ struct rb_node **parent_p,
({ \ struct rb_node ***pp_p)
struct inet_peer *u; \
struct inet_peer __rcu **v; \
\
stackptr = _stack; \
*stackptr++ = &_base->root; \
for (u = rcu_deref_locked(_base->root, _base); \
u != peer_avl_empty;) { \
int cmp = inetpeer_addr_cmp(_daddr, &u->daddr); \
if (cmp == 0) \
break; \
if (cmp == -1) \
v = &u->avl_left; \
else \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_deref_locked(*v, _base); \
} \
u; \
})
/*
* Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
* in an endless loop.
* But every pointer we follow is guaranteed to be valid thanks to RCU.
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
struct inet_peer_base *base)
{ {
struct inet_peer *u = rcu_dereference(base->root); struct rb_node **pp, *parent;
int count = 0; struct inet_peer *p;
while (u != peer_avl_empty) { pp = &base->rb_root.rb_node;
int cmp = inetpeer_addr_cmp(daddr, &u->daddr); parent = NULL;
while (*pp) {
int cmp;
parent = rcu_dereference_raw(*pp);
p = rb_entry(parent, struct inet_peer, rb_node);
cmp = inetpeer_addr_cmp(daddr, &p->daddr);
if (cmp == 0) { if (cmp == 0) {
/* Before taking a reference, check if this entry was if (!refcount_inc_not_zero(&p->refcnt))
* deleted (refcnt=0) break;
*/ return p;
if (!refcount_inc_not_zero(&u->refcnt)) {
u = NULL;
} }
return u; if (gc_stack) {
if (*gc_cnt < PEER_MAX_GC)
gc_stack[(*gc_cnt)++] = p;
} else if (unlikely(read_seqretry(&base->lock, seq))) {
break;
} }
if (cmp == -1) if (cmp == -1)
u = rcu_dereference(u->avl_left); pp = &(*pp)->rb_left;
else else
u = rcu_dereference(u->avl_right); pp = &(*pp)->rb_right;
if (unlikely(++count == PEER_MAXDEPTH))
break;
} }
*parent_p = parent;
*pp_p = pp;
return NULL; return NULL;
} }
/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start, base) \
({ \
struct inet_peer *u; \
struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
for (u = rcu_deref_locked(*v, base); \
u->avl_right != peer_avl_empty_rcu;) { \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_deref_locked(*v, base); \
} \
u; \
})
/* Called with local BH disabled and the pool lock held.
* Variable names are the proof of operation correctness.
* Look into mm/map_avl.c for more detail description of the ideas.
*/
static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
struct inet_peer __rcu ***stackend,
struct inet_peer_base *base)
{
struct inet_peer __rcu **nodep;
struct inet_peer *node, *l, *r;
int lh, rh;
while (stackend > stack) {
nodep = *--stackend;
node = rcu_deref_locked(*nodep, base);
l = rcu_deref_locked(node->avl_left, base);
r = rcu_deref_locked(node->avl_right, base);
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
ll = rcu_deref_locked(l->avl_left, base);
lr = rcu_deref_locked(l->avl_right, base);
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = lrh + 1; /* RH+1 or RH+2 */
RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
l->avl_height = node->avl_height + 1;
RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
l->avl_height = rh + 1; /* l: RH+1 */
RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
lr->avl_height = rh + 2;
RCU_INIT_POINTER(*nodep, lr);
}
} else if (rh > lh + 1) { /* r: LH+2 */
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
rr = rcu_deref_locked(r->avl_right, base);
rl = rcu_deref_locked(r->avl_left, base);
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = rlh + 1; /* LH+1 or LH+2 */
RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
r->avl_height = node->avl_height + 1;
RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
r->avl_height = lh + 1; /* r: LH+1 */
RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
rl->avl_height = lh + 2;
RCU_INIT_POINTER(*nodep, rl);
}
} else {
node->avl_height = (lh > rh ? lh : rh) + 1;
}
}
}
/* Called with local BH disabled and the pool lock held. */
#define link_to_pool(n, base) \
do { \
n->avl_height = 1; \
n->avl_left = peer_avl_empty_rcu; \
n->avl_right = peer_avl_empty_rcu; \
/* lockless readers can catch us now */ \
rcu_assign_pointer(**--stackptr, n); \
peer_avl_rebalance(stack, stackptr, base); \
} while (0)
static void inetpeer_free_rcu(struct rcu_head *head) static void inetpeer_free_rcu(struct rcu_head *head)
{ {
kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
} }
static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
if (p->avl_left == peer_avl_empty_rcu) {
*delp[0] = p->avl_right;
--stackptr;
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p, base);
BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
**--stackptr = t->avl_left;
/* t is removed, t->daddr > x->daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
RCU_INIT_POINTER(*delp[0], t);
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
BUG_ON(delp[1] != &p->avl_left);
delp[1] = &t->avl_left; /* was &p->avl_left */
}
peer_avl_rebalance(stack, stackptr, base);
base->total--;
call_rcu(&p->rcu, inetpeer_free_rcu);
}
/* perform garbage collect on all items stacked during a lookup */ /* perform garbage collect on all items stacked during a lookup */
static int inet_peer_gc(struct inet_peer_base *base, static void inet_peer_gc(struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH], struct inet_peer *gc_stack[],
struct inet_peer __rcu ***stackptr) unsigned int gc_cnt)
{ {
struct inet_peer *p, *gchead = NULL; struct inet_peer *p;
__u32 delta, ttl; __u32 delta, ttl;
int cnt = 0; int i;
if (base->total >= inet_peer_threshold) if (base->total >= inet_peer_threshold)
ttl = 0; /* be aggressive */ ttl = 0; /* be aggressive */
...@@ -379,43 +154,38 @@ static int inet_peer_gc(struct inet_peer_base *base, ...@@ -379,43 +154,38 @@ static int inet_peer_gc(struct inet_peer_base *base,
ttl = inet_peer_maxttl ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ * - (inet_peer_maxttl - inet_peer_minttl) / HZ *
base->total / inet_peer_threshold * HZ; base->total / inet_peer_threshold * HZ;
stackptr--; /* last stack slot is peer_avl_empty */ for (i = 0; i < gc_cnt; i++) {
while (stackptr > stack) { p = gc_stack[i];
stackptr--;
p = rcu_deref_locked(**stackptr, base);
if (refcount_read(&p->refcnt) == 1) {
smp_rmb();
delta = (__u32)jiffies - p->dtime; delta = (__u32)jiffies - p->dtime;
if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) { if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
p->gc_next = gchead; gc_stack[i] = NULL;
gchead = p;
}
} }
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
if (p) {
rb_erase(&p->rb_node, &base->rb_root);
base->total--;
call_rcu(&p->rcu, inetpeer_free_rcu);
} }
while ((p = gchead) != NULL) {
gchead = p->gc_next;
cnt++;
unlink_from_pool(p, base, stack);
} }
return cnt;
} }
struct inet_peer *inet_getpeer(struct inet_peer_base *base, struct inet_peer *inet_getpeer(struct inet_peer_base *base,
const struct inetpeer_addr *daddr, const struct inetpeer_addr *daddr,
int create) int create)
{ {
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; struct inet_peer *p, *gc_stack[PEER_MAX_GC];
struct inet_peer *p; struct rb_node **pp, *parent;
unsigned int sequence; unsigned int gc_cnt, seq;
int invalidated, gccnt = 0; int invalidated;
/* Attempt a lockless lookup first. /* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry. * Because of a concurrent writer, we might not find an existing entry.
*/ */
rcu_read_lock(); rcu_read_lock();
sequence = read_seqbegin(&base->lock); seq = read_seqbegin(&base->lock);
p = lookup_rcu(daddr, base); p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
invalidated = read_seqretry(&base->lock, sequence); invalidated = read_seqretry(&base->lock, seq);
rcu_read_unlock(); rcu_read_unlock();
if (p) if (p)
...@@ -428,20 +198,13 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, ...@@ -428,20 +198,13 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
/* retry an exact lookup, taking the lock before. /* retry an exact lookup, taking the lock before.
* At least, nodes should be hot in our cache. * At least, nodes should be hot in our cache.
*/ */
parent = NULL;
write_seqlock_bh(&base->lock); write_seqlock_bh(&base->lock);
relookup:
p = lookup(daddr, stack, base); gc_cnt = 0;
if (p != peer_avl_empty) { p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
refcount_inc(&p->refcnt); if (!p && create) {
write_sequnlock_bh(&base->lock); p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
return p;
}
if (!gccnt) {
gccnt = inet_peer_gc(base, stack, stackptr);
if (gccnt && create)
goto relookup;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) { if (p) {
p->daddr = *daddr; p->daddr = *daddr;
refcount_set(&p->refcnt, 2); refcount_set(&p->refcnt, 2);
...@@ -452,12 +215,14 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, ...@@ -452,12 +215,14 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
* calculation of tokens is at its maximum. * calculation of tokens is at its maximum.
*/ */
p->rate_last = jiffies - 60*HZ; p->rate_last = jiffies - 60*HZ;
INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */ rb_link_node(&p->rb_node, parent, pp);
link_to_pool(p, base); rb_insert_color(&p->rb_node, &base->rb_root);
base->total++; base->total++;
} }
}
if (gc_cnt)
inet_peer_gc(base, gc_stack, gc_cnt);
write_sequnlock_bh(&base->lock); write_sequnlock_bh(&base->lock);
return p; return p;
...@@ -467,8 +232,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer); ...@@ -467,8 +232,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p) void inet_putpeer(struct inet_peer *p)
{ {
p->dtime = (__u32)jiffies; p->dtime = (__u32)jiffies;
smp_mb__before_atomic();
refcount_dec(&p->refcnt); if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);
} }
EXPORT_SYMBOL_GPL(inet_putpeer); EXPORT_SYMBOL_GPL(inet_putpeer);
...@@ -513,30 +279,16 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) ...@@ -513,30 +279,16 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
} }
EXPORT_SYMBOL(inet_peer_xrlim_allow); EXPORT_SYMBOL(inet_peer_xrlim_allow);
static void inetpeer_inval_rcu(struct rcu_head *head)
{
struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
spin_lock_bh(&gc_lock);
list_add_tail(&p->gc_list, &gc_list);
spin_unlock_bh(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
void inetpeer_invalidate_tree(struct inet_peer_base *base) void inetpeer_invalidate_tree(struct inet_peer_base *base)
{ {
struct inet_peer *root; struct inet_peer *p, *n;
write_seqlock_bh(&base->lock);
root = rcu_deref_locked(base->root, base); rbtree_postorder_for_each_entry_safe(p, n, &base->rb_root, rb_node) {
if (root != peer_avl_empty) { inet_putpeer(p);
base->root = peer_avl_empty_rcu; cond_resched();
base->total = 0;
call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
} }
write_sequnlock_bh(&base->lock); base->rb_root = RB_ROOT;
base->total = 0;
} }
EXPORT_SYMBOL(inetpeer_invalidate_tree); EXPORT_SYMBOL(inetpeer_invalidate_tree);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment