Commit 60279944 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents d0fa7e9f 4a59a810
...@@ -71,7 +71,7 @@ struct ipfrag_skb_cb ...@@ -71,7 +71,7 @@ struct ipfrag_skb_cb
/* Describe an entry in the "incomplete datagrams" queue. */ /* Describe an entry in the "incomplete datagrams" queue. */
struct ipq { struct ipq {
struct ipq *next; /* linked list pointers */ struct hlist_node list;
struct list_head lru_list; /* lru list member */ struct list_head lru_list; /* lru list member */
u32 user; u32 user;
u32 saddr; u32 saddr;
...@@ -89,7 +89,6 @@ struct ipq { ...@@ -89,7 +89,6 @@ struct ipq {
spinlock_t lock; spinlock_t lock;
atomic_t refcnt; atomic_t refcnt;
struct timer_list timer; /* when will this queue expire? */ struct timer_list timer; /* when will this queue expire? */
struct ipq **pprev;
int iif; int iif;
struct timeval stamp; struct timeval stamp;
}; };
...@@ -99,7 +98,7 @@ struct ipq { ...@@ -99,7 +98,7 @@ struct ipq {
#define IPQ_HASHSZ 64 #define IPQ_HASHSZ 64
/* Per-bucket lock is easy to add now. */ /* Per-bucket lock is easy to add now. */
static struct ipq *ipq_hash[IPQ_HASHSZ]; static struct hlist_head ipq_hash[IPQ_HASHSZ];
static DEFINE_RWLOCK(ipfrag_lock); static DEFINE_RWLOCK(ipfrag_lock);
static u32 ipfrag_hash_rnd; static u32 ipfrag_hash_rnd;
static LIST_HEAD(ipq_lru_list); static LIST_HEAD(ipq_lru_list);
...@@ -107,9 +106,7 @@ int ip_frag_nqueues = 0; ...@@ -107,9 +106,7 @@ int ip_frag_nqueues = 0;
static __inline__ void __ipq_unlink(struct ipq *qp) static __inline__ void __ipq_unlink(struct ipq *qp)
{ {
if(qp->next) hlist_del(&qp->list);
qp->next->pprev = qp->pprev;
*qp->pprev = qp->next;
list_del(&qp->lru_list); list_del(&qp->lru_list);
ip_frag_nqueues--; ip_frag_nqueues--;
} }
...@@ -139,27 +136,18 @@ static void ipfrag_secret_rebuild(unsigned long dummy) ...@@ -139,27 +136,18 @@ static void ipfrag_secret_rebuild(unsigned long dummy)
get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
for (i = 0; i < IPQ_HASHSZ; i++) { for (i = 0; i < IPQ_HASHSZ; i++) {
struct ipq *q; struct ipq *q;
struct hlist_node *p, *n;
q = ipq_hash[i]; hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) {
while (q) {
struct ipq *next = q->next;
unsigned int hval = ipqhashfn(q->id, q->saddr, unsigned int hval = ipqhashfn(q->id, q->saddr,
q->daddr, q->protocol); q->daddr, q->protocol);
if (hval != i) { if (hval != i) {
/* Unlink. */ hlist_del(&q->list);
if (q->next)
q->next->pprev = q->pprev;
*q->pprev = q->next;
/* Relink to new hash chain. */ /* Relink to new hash chain. */
if ((q->next = ipq_hash[hval]) != NULL) hlist_add_head(&q->list, &ipq_hash[hval]);
q->next->pprev = &q->next;
ipq_hash[hval] = q;
q->pprev = &ipq_hash[hval];
} }
q = next;
} }
} }
write_unlock(&ipfrag_lock); write_unlock(&ipfrag_lock);
...@@ -310,14 +298,16 @@ static void ip_expire(unsigned long arg) ...@@ -310,14 +298,16 @@ static void ip_expire(unsigned long arg)
static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
{ {
struct ipq *qp; struct ipq *qp;
#ifdef CONFIG_SMP
struct hlist_node *n;
#endif
write_lock(&ipfrag_lock); write_lock(&ipfrag_lock);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* With SMP race we have to recheck hash table, because /* With SMP race we have to recheck hash table, because
* such entry could be created on other cpu, while we * such entry could be created on other cpu, while we
* promoted read lock to write lock. * promoted read lock to write lock.
*/ */
for(qp = ipq_hash[hash]; qp; qp = qp->next) { hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
if(qp->id == qp_in->id && if(qp->id == qp_in->id &&
qp->saddr == qp_in->saddr && qp->saddr == qp_in->saddr &&
qp->daddr == qp_in->daddr && qp->daddr == qp_in->daddr &&
...@@ -337,10 +327,7 @@ static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) ...@@ -337,10 +327,7 @@ static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
atomic_inc(&qp->refcnt); atomic_inc(&qp->refcnt);
atomic_inc(&qp->refcnt); atomic_inc(&qp->refcnt);
if((qp->next = ipq_hash[hash]) != NULL) hlist_add_head(&qp->list, &ipq_hash[hash]);
qp->next->pprev = &qp->next;
ipq_hash[hash] = qp;
qp->pprev = &ipq_hash[hash];
INIT_LIST_HEAD(&qp->lru_list); INIT_LIST_HEAD(&qp->lru_list);
list_add_tail(&qp->lru_list, &ipq_lru_list); list_add_tail(&qp->lru_list, &ipq_lru_list);
ip_frag_nqueues++; ip_frag_nqueues++;
...@@ -392,9 +379,10 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user) ...@@ -392,9 +379,10 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
__u8 protocol = iph->protocol; __u8 protocol = iph->protocol;
unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
struct ipq *qp; struct ipq *qp;
struct hlist_node *n;
read_lock(&ipfrag_lock); read_lock(&ipfrag_lock);
for(qp = ipq_hash[hash]; qp; qp = qp->next) { hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
if(qp->id == id && if(qp->id == id &&
qp->saddr == saddr && qp->saddr == saddr &&
qp->daddr == daddr && qp->daddr == daddr &&
......
...@@ -74,7 +74,7 @@ struct ip6frag_skb_cb ...@@ -74,7 +74,7 @@ struct ip6frag_skb_cb
struct frag_queue struct frag_queue
{ {
struct frag_queue *next; struct hlist_node list;
struct list_head lru_list; /* lru list member */ struct list_head lru_list; /* lru list member */
__u32 id; /* fragment id */ __u32 id; /* fragment id */
...@@ -95,14 +95,13 @@ struct frag_queue ...@@ -95,14 +95,13 @@ struct frag_queue
#define FIRST_IN 2 #define FIRST_IN 2
#define LAST_IN 1 #define LAST_IN 1
__u16 nhoffset; __u16 nhoffset;
struct frag_queue **pprev;
}; };
/* Hash table. */ /* Hash table. */
#define IP6Q_HASHSZ 64 #define IP6Q_HASHSZ 64
static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ]; static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
static DEFINE_RWLOCK(ip6_frag_lock); static DEFINE_RWLOCK(ip6_frag_lock);
static u32 ip6_frag_hash_rnd; static u32 ip6_frag_hash_rnd;
static LIST_HEAD(ip6_frag_lru_list); static LIST_HEAD(ip6_frag_lru_list);
...@@ -110,9 +109,7 @@ int ip6_frag_nqueues = 0; ...@@ -110,9 +109,7 @@ int ip6_frag_nqueues = 0;
static __inline__ void __fq_unlink(struct frag_queue *fq) static __inline__ void __fq_unlink(struct frag_queue *fq)
{ {
if(fq->next) hlist_del(&fq->list);
fq->next->pprev = fq->pprev;
*fq->pprev = fq->next;
list_del(&fq->lru_list); list_del(&fq->lru_list);
ip6_frag_nqueues--; ip6_frag_nqueues--;
} }
...@@ -163,28 +160,21 @@ static void ip6_frag_secret_rebuild(unsigned long dummy) ...@@ -163,28 +160,21 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
for (i = 0; i < IP6Q_HASHSZ; i++) { for (i = 0; i < IP6Q_HASHSZ; i++) {
struct frag_queue *q; struct frag_queue *q;
struct hlist_node *p, *n;
q = ip6_frag_hash[i]; hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
while (q) {
struct frag_queue *next = q->next;
unsigned int hval = ip6qhashfn(q->id, unsigned int hval = ip6qhashfn(q->id,
&q->saddr, &q->saddr,
&q->daddr); &q->daddr);
if (hval != i) { if (hval != i) {
/* Unlink. */ hlist_del(&q->list);
if (q->next)
q->next->pprev = q->pprev;
*q->pprev = q->next;
/* Relink to new hash chain. */ /* Relink to new hash chain. */
if ((q->next = ip6_frag_hash[hval]) != NULL) hlist_add_head(&q->list,
q->next->pprev = &q->next; &ip6_frag_hash[hval]);
ip6_frag_hash[hval] = q;
q->pprev = &ip6_frag_hash[hval];
}
q = next; }
} }
} }
write_unlock(&ip6_frag_lock); write_unlock(&ip6_frag_lock);
...@@ -337,10 +327,13 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash, ...@@ -337,10 +327,13 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
struct frag_queue *fq_in) struct frag_queue *fq_in)
{ {
struct frag_queue *fq; struct frag_queue *fq;
#ifdef CONFIG_SMP
struct hlist_node *n;
#endif
write_lock(&ip6_frag_lock); write_lock(&ip6_frag_lock);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) { hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
if (fq->id == fq_in->id && if (fq->id == fq_in->id &&
ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
...@@ -358,10 +351,7 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash, ...@@ -358,10 +351,7 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
atomic_inc(&fq->refcnt); atomic_inc(&fq->refcnt);
atomic_inc(&fq->refcnt); atomic_inc(&fq->refcnt);
if((fq->next = ip6_frag_hash[hash]) != NULL) hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
fq->next->pprev = &fq->next;
ip6_frag_hash[hash] = fq;
fq->pprev = &ip6_frag_hash[hash];
INIT_LIST_HEAD(&fq->lru_list); INIT_LIST_HEAD(&fq->lru_list);
list_add_tail(&fq->lru_list, &ip6_frag_lru_list); list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
ip6_frag_nqueues++; ip6_frag_nqueues++;
...@@ -401,10 +391,11 @@ static __inline__ struct frag_queue * ...@@ -401,10 +391,11 @@ static __inline__ struct frag_queue *
fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst) fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
{ {
struct frag_queue *fq; struct frag_queue *fq;
struct hlist_node *n;
unsigned int hash = ip6qhashfn(id, src, dst); unsigned int hash = ip6qhashfn(id, src, dst);
read_lock(&ip6_frag_lock); read_lock(&ip6_frag_lock);
for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) { hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
if (fq->id == id && if (fq->id == id &&
ipv6_addr_equal(src, &fq->saddr) && ipv6_addr_equal(src, &fq->saddr) &&
ipv6_addr_equal(dst, &fq->daddr)) { ipv6_addr_equal(dst, &fq->daddr)) {
......
...@@ -387,7 +387,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, ...@@ -387,7 +387,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
static void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) static void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
{ {
ASSERT_WRITE_LOCK(&nf_conntrack_lock); ASSERT_WRITE_LOCK(&nf_conntrack_lock);
NF_CT_ASSERT(!timer_pending(&exp_timeout)); NF_CT_ASSERT(!timer_pending(&exp->timeout));
list_del(&exp->list); list_del(&exp->list);
NF_CT_STAT_INC(expect_delete); NF_CT_STAT_INC(expect_delete);
exp->master->expecting--; exp->master->expecting--;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment