Commit 4663afe2 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

[NET]: reduce sizeof(struct inet_peer), cleanup, change in peer_check_expire()

1) shrink struct inet_peer on 64 bits platforms.
parent ea614d7f
...@@ -19,7 +19,7 @@ struct inet_peer ...@@ -19,7 +19,7 @@ struct inet_peer
{ {
struct inet_peer *avl_left, *avl_right; struct inet_peer *avl_left, *avl_right;
struct inet_peer *unused_next, **unused_prevp; struct inet_peer *unused_next, **unused_prevp;
unsigned long dtime; /* the time of last use of not __u32 dtime; /* the time of last use of not
* referenced entries */ * referenced entries */
atomic_t refcnt; atomic_t refcnt;
__be32 v4daddr; /* peer's address */ __be32 v4daddr; /* peer's address */
...@@ -35,21 +35,8 @@ void inet_initpeers(void) __init; ...@@ -35,21 +35,8 @@ void inet_initpeers(void) __init;
/* can be called with or without local BH being disabled */ /* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(__be32 daddr, int create); struct inet_peer *inet_getpeer(__be32 daddr, int create);
extern spinlock_t inet_peer_unused_lock;
extern struct inet_peer **inet_peer_unused_tailp;
/* can be called from BH context or outside */ /* can be called from BH context or outside */
static inline void inet_putpeer(struct inet_peer *p) extern void inet_putpeer(struct inet_peer *p);
{
spin_lock_bh(&inet_peer_unused_lock);
if (atomic_dec_and_test(&p->refcnt)) {
p->unused_prevp = inet_peer_unused_tailp;
p->unused_next = NULL;
*inet_peer_unused_tailp = p;
inet_peer_unused_tailp = &p->unused_next;
p->dtime = jiffies;
}
spin_unlock_bh(&inet_peer_unused_lock);
}
extern spinlock_t inet_peer_idlock; extern spinlock_t inet_peer_idlock;
/* can be called with or without local BH being disabled */ /* can be called with or without local BH being disabled */
......
...@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ ...@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */
static struct inet_peer *inet_peer_unused_head; static struct inet_peer *inet_peer_unused_head;
/* Exported for inet_putpeer inline function. */ static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; static DEFINE_SPINLOCK(inet_peer_unused_lock);
DEFINE_SPINLOCK(inet_peer_unused_lock);
#define PEER_MAX_CLEANUP_WORK 30
static void peer_check_expire(unsigned long dummy); static void peer_check_expire(unsigned long dummy);
static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
...@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl) ...@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
spin_lock_bh(&inet_peer_unused_lock); spin_lock_bh(&inet_peer_unused_lock);
p = inet_peer_unused_head; p = inet_peer_unused_head;
if (p != NULL) { if (p != NULL) {
if (time_after(p->dtime + ttl, jiffies)) { __u32 delta = (__u32)jiffies - p->dtime;
if (delta < ttl) {
/* Do not prune fresh entries. */ /* Do not prune fresh entries. */
spin_unlock_bh(&inet_peer_unused_lock); spin_unlock_bh(&inet_peer_unused_lock);
return -1; return -1;
...@@ -432,7 +431,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) ...@@ -432,7 +431,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
/* Called with local BH disabled. */ /* Called with local BH disabled. */
static void peer_check_expire(unsigned long dummy) static void peer_check_expire(unsigned long dummy)
{ {
int i; unsigned long now = jiffies;
int ttl; int ttl;
if (peer_total >= inet_peer_threshold) if (peer_total >= inet_peer_threshold)
...@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy) ...@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ * - (inet_peer_maxttl - inet_peer_minttl) / HZ *
peer_total / inet_peer_threshold * HZ; peer_total / inet_peer_threshold * HZ;
for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); while (!cleanup_once(ttl)) {
if (jiffies != now)
break;
}
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
* interval depending on the total number of entries (more entries, * interval depending on the total number of entries (more entries,
...@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy) ...@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
peer_total / inet_peer_threshold * HZ; peer_total / inet_peer_threshold * HZ;
add_timer(&peer_periodic_timer); add_timer(&peer_periodic_timer);
} }
void inet_putpeer(struct inet_peer *p)
{
spin_lock_bh(&inet_peer_unused_lock);
if (atomic_dec_and_test(&p->refcnt)) {
p->unused_prevp = inet_peer_unused_tailp;
p->unused_next = NULL;
*inet_peer_unused_tailp = p;
inet_peer_unused_tailp = &p->unused_next;
p->dtime = (__u32)jiffies;
}
spin_unlock_bh(&inet_peer_unused_lock);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment