Commit e3ede754 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/network-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 0eaf89f2 b938377b
...@@ -205,6 +205,13 @@ typedef struct tcp_pcount { ...@@ -205,6 +205,13 @@ typedef struct tcp_pcount {
__u32 val; __u32 val;
} tcp_pcount_t; } tcp_pcount_t;
enum tcp_congestion_algo {
TCP_RENO=0,
TCP_VEGAS,
TCP_WESTWOOD,
TCP_BIC,
};
struct tcp_opt { struct tcp_opt {
int tcp_header_len; /* Bytes of tcp header to send */ int tcp_header_len; /* Bytes of tcp header to send */
...@@ -265,7 +272,7 @@ struct tcp_opt { ...@@ -265,7 +272,7 @@ struct tcp_opt {
__u8 frto_counter; /* Number of new acks after RTO */ __u8 frto_counter; /* Number of new acks after RTO */
__u32 frto_highmark; /* snd_nxt when RTO occurred */ __u32 frto_highmark; /* snd_nxt when RTO occurred */
__u8 unused_pad; __u8 adv_cong; /* Using Vegas, Westwood, or BIC */
__u8 defer_accept; /* User waits for some data after accept() */ __u8 defer_accept; /* User waits for some data after accept() */
/* one byte hole, try to pack */ /* one byte hole, try to pack */
...@@ -412,7 +419,6 @@ struct tcp_opt { ...@@ -412,7 +419,6 @@ struct tcp_opt {
__u32 beg_snd_nxt; /* right edge during last RTT */ __u32 beg_snd_nxt; /* right edge during last RTT */
__u32 beg_snd_una; /* left edge during last RTT */ __u32 beg_snd_una; /* left edge during last RTT */
__u32 beg_snd_cwnd; /* saves the size of the cwnd */ __u32 beg_snd_cwnd; /* saves the size of the cwnd */
__u8 do_vegas; /* do vegas for this connection */
__u8 doing_vegas_now;/* if true, do vegas for this RTT */ __u8 doing_vegas_now;/* if true, do vegas for this RTT */
__u16 cntRTT; /* # of RTTs measured within last RTT */ __u16 cntRTT; /* # of RTTs measured within last RTT */
__u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ __u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
......
...@@ -74,7 +74,7 @@ extern int ipv6_rcv_saddr_equal(const struct sock *sk, ...@@ -74,7 +74,7 @@ extern int ipv6_rcv_saddr_equal(const struct sock *sk,
const struct sock *sk2); const struct sock *sk2);
extern void addrconf_join_solict(struct net_device *dev, extern void addrconf_join_solict(struct net_device *dev,
struct in6_addr *addr); struct in6_addr *addr);
extern void addrconf_leave_solict(struct net_device *dev, extern void addrconf_leave_solict(struct inet6_dev *idev,
struct in6_addr *addr); struct in6_addr *addr);
/* /*
...@@ -89,6 +89,7 @@ extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr, ...@@ -89,6 +89,7 @@ extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,
struct in6_addr *src_addr); struct in6_addr *src_addr);
extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr);
extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr);
extern void ipv6_mc_up(struct inet6_dev *idev); extern void ipv6_mc_up(struct inet6_dev *idev);
extern void ipv6_mc_down(struct inet6_dev *idev); extern void ipv6_mc_down(struct inet6_dev *idev);
...@@ -111,6 +112,7 @@ extern void ipv6_sock_ac_close(struct sock *sk); ...@@ -111,6 +112,7 @@ extern void ipv6_sock_ac_close(struct sock *sk);
extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex); extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex);
extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr);
extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr);
extern int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr);
extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr); extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr);
......
...@@ -18,7 +18,6 @@ struct dn_neigh { ...@@ -18,7 +18,6 @@ struct dn_neigh {
extern void dn_neigh_init(void); extern void dn_neigh_init(void);
extern void dn_neigh_cleanup(void); extern void dn_neigh_cleanup(void);
extern struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr);
extern int dn_neigh_router_hello(struct sk_buff *skb); extern int dn_neigh_router_hello(struct sk_buff *skb);
extern int dn_neigh_endnode_hello(struct sk_buff *skb); extern int dn_neigh_endnode_hello(struct sk_buff *skb);
extern void dn_neigh_pointopoint_hello(struct sk_buff *skb); extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -139,9 +140,6 @@ struct pneigh_entry ...@@ -139,9 +140,6 @@ struct pneigh_entry
u8 key[0]; u8 key[0];
}; };
#define NEIGH_HASHMASK 0x1F
#define PNEIGH_HASHMASK 0xF
/* /*
* neighbour table manipulation * neighbour table manipulation
*/ */
...@@ -175,8 +173,11 @@ struct neigh_table ...@@ -175,8 +173,11 @@ struct neigh_table
struct neigh_parms *parms_list; struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep; kmem_cache_t *kmem_cachep;
struct neigh_statistics stats; struct neigh_statistics stats;
struct neighbour *hash_buckets[NEIGH_HASHMASK+1]; struct neighbour **hash_buckets;
struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1]; unsigned int hash_mask;
__u32 hash_rnd;
unsigned int hash_chain_gc;
struct pneigh_entry **phash_buckets;
}; };
/* flags for neigh_update() */ /* flags for neigh_update() */
...@@ -191,6 +192,8 @@ extern int neigh_table_clear(struct neigh_table *tbl); ...@@ -191,6 +192,8 @@ extern int neigh_table_clear(struct neigh_table *tbl);
extern struct neighbour * neigh_lookup(struct neigh_table *tbl, extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
const void *pkey, const void *pkey,
struct net_device *dev); struct net_device *dev);
extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
const void *pkey);
extern struct neighbour * neigh_create(struct neigh_table *tbl, extern struct neighbour * neigh_create(struct neigh_table *tbl,
const void *pkey, const void *pkey,
struct net_device *dev); struct net_device *dev);
...@@ -224,6 +227,24 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); ...@@ -224,6 +227,24 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern void neigh_app_ns(struct neighbour *n); extern void neigh_app_ns(struct neighbour *n);
extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct neigh_table *tbl;
void *(*neigh_sub_iter)(struct neigh_seq_state *state,
struct neighbour *n, loff_t *pos);
unsigned int bucket;
unsigned int flags;
#define NEIGH_SEQ_NEIGH_ONLY 0x00000001
#define NEIGH_SEQ_IS_PNEIGH 0x00000002
#define NEIGH_SEQ_SKIP_NOARP 0x00000004
};
extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
extern void neigh_seq_stop(struct seq_file *, void *);
extern int neigh_sysctl_register(struct net_device *dev, extern int neigh_sysctl_register(struct net_device *dev,
struct neigh_parms *p, struct neigh_parms *p,
int p_id, int pdev_id, int p_id, int pdev_id,
......
...@@ -1271,6 +1271,13 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp) ...@@ -1271,6 +1271,13 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
tcp_get_pcount(&tp->retrans_out)); tcp_get_pcount(&tp->retrans_out));
} }
/*
* Which congestion algorithim is in use on the connection.
*/
#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
/* Recalculate snd_ssthresh, we want to set it to: /* Recalculate snd_ssthresh, we want to set it to:
* *
* Reno: * Reno:
...@@ -1283,7 +1290,7 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp) ...@@ -1283,7 +1290,7 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
*/ */
static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
{ {
if (sysctl_tcp_bic) { if (tcp_is_bic(tp)) {
if (sysctl_tcp_bic_fast_convergence && if (sysctl_tcp_bic_fast_convergence &&
tp->snd_cwnd < tp->bictcp.last_max_cwnd) tp->snd_cwnd < tp->bictcp.last_max_cwnd)
tp->bictcp.last_max_cwnd tp->bictcp.last_max_cwnd
...@@ -1303,11 +1310,6 @@ static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) ...@@ -1303,11 +1310,6 @@ static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
/* Stop taking Vegas samples for now. */ /* Stop taking Vegas samples for now. */
#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0) #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
/* Is this TCP connection using Vegas (regardless of whether it is taking
* Vegas measurements at the current time)?
*/
#define tcp_is_vegas(__tp) ((__tp)->vegas.do_vegas)
static inline void tcp_vegas_enable(struct tcp_opt *tp) static inline void tcp_vegas_enable(struct tcp_opt *tp)
{ {
/* There are several situations when we must "re-start" Vegas: /* There are several situations when we must "re-start" Vegas:
...@@ -1340,7 +1342,7 @@ static inline void tcp_vegas_enable(struct tcp_opt *tp) ...@@ -1340,7 +1342,7 @@ static inline void tcp_vegas_enable(struct tcp_opt *tp)
/* Should we be taking Vegas samples right now? */ /* Should we be taking Vegas samples right now? */
#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now) #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
extern void tcp_vegas_init(struct tcp_opt *tp); extern void tcp_ca_init(struct tcp_opt *tp);
static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state) static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
{ {
...@@ -2024,7 +2026,7 @@ extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); ...@@ -2024,7 +2026,7 @@ extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq) static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tp))
tp->westwood.rtt = rtt_seq; tp->westwood.rtt = rtt_seq;
} }
...@@ -2033,13 +2035,13 @@ void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *); ...@@ -2033,13 +2035,13 @@ void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_fast_bw(sk, skb); __tcp_westwood_fast_bw(sk, skb);
} }
static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_slow_bw(sk, skb); __tcp_westwood_slow_bw(sk, skb);
} }
...@@ -2052,14 +2054,14 @@ static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp) ...@@ -2052,14 +2054,14 @@ static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp) static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
{ {
return sysctl_tcp_westwood ? __tcp_westwood_bw_rttmin(tp) : 0; return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
} }
static inline int tcp_westwood_ssthresh(struct tcp_opt *tp) static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
{ {
__u32 ssthresh = 0; __u32 ssthresh = 0;
if (sysctl_tcp_westwood) { if (tcp_is_westwood(tp)) {
ssthresh = __tcp_westwood_bw_rttmin(tp); ssthresh = __tcp_westwood_bw_rttmin(tp);
if (ssthresh) if (ssthresh)
tp->snd_ssthresh = ssthresh; tp->snd_ssthresh = ssthresh;
...@@ -2072,7 +2074,7 @@ static inline int tcp_westwood_cwnd(struct tcp_opt *tp) ...@@ -2072,7 +2074,7 @@ static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
{ {
__u32 cwnd = 0; __u32 cwnd = 0;
if (sysctl_tcp_westwood) { if (tcp_is_westwood(tp)) {
cwnd = __tcp_westwood_bw_rttmin(tp); cwnd = __tcp_westwood_bw_rttmin(tp);
if (cwnd) if (cwnd)
tp->snd_cwnd = cwnd; tp->snd_cwnd = cwnd;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <net/route.h> /* for struct rtable and routing */ #include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */ #include <net/icmp.h> /* icmp_send */
#include <asm/param.h> /* for HZ */ #include <asm/param.h> /* for HZ */
...@@ -123,64 +124,49 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) ...@@ -123,64 +124,49 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
spin_unlock_bh(&entry->neigh->dev->xmit_lock); spin_unlock_bh(&entry->neigh->dev->xmit_lock);
} }
/* The neighbour entry n->lock is held. */
static void idle_timer_check(unsigned long dummy) static int neigh_check_cb(struct neighbour *n)
{ {
int i;
/*DPRINTK("idle_timer_check\n");*/
write_lock(&clip_tbl.lock);
for (i = 0; i <= NEIGH_HASHMASK; i++) {
struct neighbour **np;
for (np = &clip_tbl.hash_buckets[i]; *np;) {
struct neighbour *n = *np;
struct atmarp_entry *entry = NEIGH2ENTRY(n); struct atmarp_entry *entry = NEIGH2ENTRY(n);
struct clip_vcc *clip_vcc; struct clip_vcc *cv;
for (cv = entry->vccs; cv; cv = cv->next) {
unsigned long exp = cv->last_use + cv->idle_timeout;
write_lock(&n->lock); if (cv->idle_timeout && time_after(jiffies, exp)) {
DPRINTK("releasing vcc %p->%p of entry %p\n",
for (clip_vcc = entry->vccs; clip_vcc; cv, cv->vcc, entry);
clip_vcc = clip_vcc->next) vcc_release_async(cv->vcc, -ETIMEDOUT);
if (clip_vcc->idle_timeout &&
time_after(jiffies, clip_vcc->last_use+
clip_vcc->idle_timeout)) {
DPRINTK("releasing vcc %p->%p of "
"entry %p\n",clip_vcc,clip_vcc->vcc,
entry);
vcc_release_async(clip_vcc->vcc,
-ETIMEDOUT);
} }
if (entry->vccs ||
time_before(jiffies, entry->expires)) {
np = &n->next;
write_unlock(&n->lock);
continue;
} }
if (entry->vccs || time_before(jiffies, entry->expires))
return 0;
if (atomic_read(&n->refcnt) > 1) { if (atomic_read(&n->refcnt) > 1) {
struct sk_buff *skb; struct sk_buff *skb;
DPRINTK("destruction postponed with ref %d\n", DPRINTK("destruction postponed with ref %d\n",
atomic_read(&n->refcnt)); atomic_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) !=
NULL) while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb); dev_kfree_skb(skb);
np = &n->next;
write_unlock(&n->lock); return 0;
continue;
} }
*np = n->next;
DPRINTK("expired neigh %p\n",n); DPRINTK("expired neigh %p\n",n);
n->dead = 1; return 1;
write_unlock(&n->lock); }
neigh_release(n);
} static void idle_timer_check(unsigned long dummy)
} {
write_lock(&clip_tbl.lock);
__neigh_for_each_release(&clip_tbl, neigh_check_cb);
mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ);
write_unlock(&clip_tbl.lock); write_unlock(&clip_tbl.lock);
} }
static int clip_arp_rcv(struct sk_buff *skb) static int clip_arp_rcv(struct sk_buff *skb)
{ {
struct atm_vcc *vcc; struct atm_vcc *vcc;
...@@ -343,15 +329,7 @@ static int clip_constructor(struct neighbour *neigh) ...@@ -343,15 +329,7 @@ static int clip_constructor(struct neighbour *neigh)
static u32 clip_hash(const void *pkey, const struct net_device *dev) static u32 clip_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(u32 *)pkey, dev->ifindex, clip_tbl.hash_rnd);
hash_val = *(u32*)pkey;
hash_val ^= (hash_val>>16);
hash_val ^= hash_val>>8;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val;
} }
static struct neigh_table clip_tbl = { static struct neigh_table clip_tbl = {
...@@ -833,120 +811,126 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) ...@@ -833,120 +811,126 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
} }
} }
/* This means the neighbour entry has no attached VCC objects. */
#define SEQ_NO_VCC_TOKEN ((void *) 2)
static void atmarp_info(struct seq_file *seq, struct net_device *dev, static void atmarp_info(struct seq_file *seq, struct net_device *dev,
struct atmarp_entry *entry, struct clip_vcc *clip_vcc) struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
{ {
unsigned long exp;
char buf[17]; char buf[17];
int svc, off; int svc, llc, off;
svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
(clip_vcc->vcc->sk->sk_family == AF_ATMSVC));
svc = !clip_vcc || clip_vcc->vcc->sk->sk_family == AF_ATMSVC; llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
seq_printf(seq, "%-6s%-4s%-4s%5ld ", dev->name, svc ? "SVC" : "PVC", clip_vcc->encap);
!clip_vcc || clip_vcc->encap ? "LLC" : "NULL",
(jiffies-(clip_vcc ? clip_vcc->last_use : entry->neigh->used))/HZ);
off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d", NIPQUAD(entry->ip)); if (clip_vcc == SEQ_NO_VCC_TOKEN)
exp = entry->neigh->used;
else
exp = clip_vcc->last_use;
exp = (jiffies - exp) / HZ;
seq_printf(seq, "%-6s%-4s%-4s%5ld ",
dev->name,
svc ? "SVC" : "PVC",
llc ? "LLC" : "NULL",
exp);
off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d",
NIPQUAD(entry->ip));
while (off < 16) while (off < 16)
buf[off++] = ' '; buf[off++] = ' ';
buf[off] = '\0'; buf[off] = '\0';
seq_printf(seq, "%s", buf); seq_printf(seq, "%s", buf);
if (!clip_vcc) { if (clip_vcc == SEQ_NO_VCC_TOKEN) {
if (time_before(jiffies, entry->expires)) if (time_before(jiffies, entry->expires))
seq_printf(seq, "(resolving)\n"); seq_printf(seq, "(resolving)\n");
else else
seq_printf(seq, "(expired, ref %d)\n", seq_printf(seq, "(expired, ref %d)\n",
atomic_read(&entry->neigh->refcnt)); atomic_read(&entry->neigh->refcnt));
} else if (!svc) { } else if (!svc) {
seq_printf(seq, "%d.%d.%d\n", clip_vcc->vcc->dev->number, seq_printf(seq, "%d.%d.%d\n",
clip_vcc->vcc->vpi, clip_vcc->vcc->vci); clip_vcc->vcc->dev->number,
clip_vcc->vcc->vpi,
clip_vcc->vcc->vci);
} else { } else {
svc_addr(seq, &clip_vcc->vcc->remote); svc_addr(seq, &clip_vcc->vcc->remote);
seq_putc(seq, '\n'); seq_putc(seq, '\n');
} }
} }
struct arp_state { struct clip_seq_state {
int bucket; /* This member must be first. */
struct neighbour *n; struct neigh_seq_state ns;
/* Local to clip specific iteration. */
struct clip_vcc *vcc; struct clip_vcc *vcc;
}; };
static void *arp_vcc_walk(struct arp_state *state, static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
struct atmarp_entry *e, loff_t *l) struct clip_vcc *curr)
{ {
struct clip_vcc *vcc = state->vcc; if (!curr) {
curr = e->vccs;
if (!vcc) if (!curr)
vcc = e->vccs; return SEQ_NO_VCC_TOKEN;
if (vcc == (void *)1) { return curr;
vcc = e->vccs;
--*l;
}
for (; vcc; vcc = vcc->next) {
if (--*l < 0)
break;
} }
state->vcc = vcc; if (curr == SEQ_NO_VCC_TOKEN)
return (*l < 0) ? state : NULL; return NULL;
curr = curr->next;
return curr;
} }
static void *arp_get_idx(struct arp_state *state, loff_t l) static void *clip_seq_vcc_walk(struct clip_seq_state *state,
struct atmarp_entry *e, loff_t *pos)
{ {
void *v = NULL; struct clip_vcc *vcc = state->vcc;
for (; state->bucket <= NEIGH_HASHMASK; state->bucket++) { vcc = clip_seq_next_vcc(e, vcc);
for (; state->n; state->n = state->n->next) { if (vcc && pos != NULL) {
v = arp_vcc_walk(state, NEIGH2ENTRY(state->n), &l); while (*pos) {
if (v) vcc = clip_seq_next_vcc(e, vcc);
goto done; if (!vcc)
break;
--(*pos);
} }
state->n = clip_tbl.hash_buckets[state->bucket + 1];
} }
done: state->vcc = vcc;
return v;
}
static void *arp_seq_start(struct seq_file *seq, loff_t *pos) return vcc;
{
struct arp_state *state = seq->private;
void *ret = (void *)1;
read_lock_bh(&clip_tbl.lock);
state->bucket = 0;
state->n = clip_tbl.hash_buckets[0];
state->vcc = (void *)1;
if (*pos)
ret = arp_get_idx(state, *pos);
return ret;
} }
static void arp_seq_stop(struct seq_file *seq, void *v) static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
struct neighbour *n, loff_t *pos)
{ {
struct arp_state *state = seq->private; struct clip_seq_state *state = (struct clip_seq_state *) _state;
if (state->bucket != -1) return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
read_unlock_bh(&clip_tbl.lock);
} }
static void *arp_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void *clip_seq_start(struct seq_file *seq, loff_t *pos)
{ {
struct arp_state *state = seq->private; return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
v = arp_get_idx(state, 1);
*pos += !!PTR_ERR(v);
return v;
} }
static int arp_seq_show(struct seq_file *seq, void *v) static int clip_seq_show(struct seq_file *seq, void *v)
{ {
static char atm_arp_banner[] = static char atm_arp_banner[] =
"IPitf TypeEncp Idle IP address ATM address\n"; "IPitf TypeEncp Idle IP address ATM address\n";
if (v == (void *)1) if (v == SEQ_START_TOKEN) {
seq_puts(seq, atm_arp_banner); seq_puts(seq, atm_arp_banner);
else { } else {
struct arp_state *state = seq->private; struct clip_seq_state *state = seq->private;
struct neighbour *n = state->n; struct neighbour *n = v;
struct clip_vcc *vcc = state->vcc; struct clip_vcc *vcc = state->vcc;
atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc); atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
...@@ -955,15 +939,15 @@ static int arp_seq_show(struct seq_file *seq, void *v) ...@@ -955,15 +939,15 @@ static int arp_seq_show(struct seq_file *seq, void *v)
} }
static struct seq_operations arp_seq_ops = { static struct seq_operations arp_seq_ops = {
.start = arp_seq_start, .start = clip_seq_start,
.next = arp_seq_next, .next = neigh_seq_next,
.stop = arp_seq_stop, .stop = neigh_seq_stop,
.show = arp_seq_show, .show = clip_seq_show,
}; };
static int arp_seq_open(struct inode *inode, struct file *file) static int arp_seq_open(struct inode *inode, struct file *file)
{ {
struct arp_state *state; struct clip_seq_state *state;
struct seq_file *seq; struct seq_file *seq;
int rc = -EAGAIN; int rc = -EAGAIN;
...@@ -972,6 +956,8 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -972,6 +956,8 @@ static int arp_seq_open(struct inode *inode, struct file *file)
rc = -ENOMEM; rc = -ENOMEM;
goto out_kfree; goto out_kfree;
} }
memset(state, 0, sizeof(*state));
state->ns.neigh_sub_iter = clip_seq_sub_iter;
rc = seq_open(file, &arp_seq_ops); rc = seq_open(file, &arp_seq_ops);
if (rc) if (rc)
...@@ -987,16 +973,11 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -987,16 +973,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
goto out; goto out;
} }
static int arp_seq_release(struct inode *inode, struct file *file)
{
return seq_release_private(inode, file);
}
static struct file_operations arp_seq_fops = { static struct file_operations arp_seq_fops = {
.open = arp_seq_open, .open = arp_seq_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = arp_seq_release, .release = seq_release_private,
.owner = THIS_MODULE .owner = THIS_MODULE
}; };
#endif #endif
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <net/dst.h> #include <net/dst.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/random.h>
#define NEIGH_DEBUG 1 #define NEIGH_DEBUG 1
...@@ -47,6 +48,8 @@ ...@@ -47,6 +48,8 @@
#define NEIGH_PRINTK2 NEIGH_PRINTK #define NEIGH_PRINTK2 NEIGH_PRINTK
#endif #endif
#define PNEIGH_HASHMASK 0xF
static void neigh_timer_handler(unsigned long arg); static void neigh_timer_handler(unsigned long arg);
#ifdef CONFIG_ARPD #ifdef CONFIG_ARPD
static void neigh_app_notify(struct neighbour *n); static void neigh_app_notify(struct neighbour *n);
...@@ -113,27 +116,19 @@ static int neigh_forced_gc(struct neigh_table *tbl) ...@@ -113,27 +116,19 @@ static int neigh_forced_gc(struct neigh_table *tbl)
int shrunk = 0; int shrunk = 0;
int i; int i;
for (i = 0; i <= NEIGH_HASHMASK; i++) { write_lock_bh(&tbl->lock);
for (i = 0; i <= tbl->hash_mask; i++) {
struct neighbour *n, **np; struct neighbour *n, **np;
np = &tbl->hash_buckets[i]; np = &tbl->hash_buckets[i];
write_lock_bh(&tbl->lock);
while ((n = *np) != NULL) { while ((n = *np) != NULL) {
/* Neighbour record may be discarded if: /* Neighbour record may be discarded if:
- nobody refers to it. * - nobody refers to it.
- it is not permanent * - it is not permanent
- (NEW and probably wrong)
INCOMPLETE entries are kept at least for
n->parms->retrans_time, otherwise we could
flood network with resolution requests.
It is not clear, what is better table overflow
or flooding.
*/ */
write_lock(&n->lock); write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 1 && if (atomic_read(&n->refcnt) == 1 &&
!(n->nud_state & NUD_PERMANENT) && !(n->nud_state & NUD_PERMANENT)) {
(n->nud_state != NUD_INCOMPLETE ||
time_after(jiffies, n->used + n->parms->retrans_time))) {
*np = n->next; *np = n->next;
n->dead = 1; n->dead = 1;
shrunk = 1; shrunk = 1;
...@@ -144,10 +139,12 @@ static int neigh_forced_gc(struct neigh_table *tbl) ...@@ -144,10 +139,12 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_unlock(&n->lock); write_unlock(&n->lock);
np = &n->next; np = &n->next;
} }
write_unlock_bh(&tbl->lock);
} }
tbl->last_flush = jiffies; tbl->last_flush = jiffies;
write_unlock_bh(&tbl->lock);
return shrunk; return shrunk;
} }
...@@ -177,7 +174,7 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) ...@@ -177,7 +174,7 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
write_lock_bh(&tbl->lock); write_lock_bh(&tbl->lock);
for (i=0; i <= NEIGH_HASHMASK; i++) { for (i=0; i <= tbl->hash_mask; i++) {
struct neighbour *n, **np; struct neighbour *n, **np;
np = &tbl->hash_buckets[i]; np = &tbl->hash_buckets[i];
...@@ -204,7 +201,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) ...@@ -204,7 +201,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
write_lock_bh(&tbl->lock); write_lock_bh(&tbl->lock);
for (i = 0; i <= NEIGH_HASHMASK; i++) { for (i = 0; i <= tbl->hash_mask; i++) {
struct neighbour *n, **np = &tbl->hash_buckets[i]; struct neighbour *n, **np = &tbl->hash_buckets[i];
while ((n = *np) != NULL) { while ((n = *np) != NULL) {
...@@ -286,12 +283,73 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) ...@@ -286,12 +283,73 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
return n; return n;
} }
static struct neighbour **neigh_hash_alloc(unsigned int entries)
{
unsigned long size = entries * sizeof(struct neighbour *);
struct neighbour **ret;
if (size <= PAGE_SIZE) {
ret = kmalloc(size, GFP_ATOMIC);
} else {
ret = (struct neighbour **)
__get_free_pages(GFP_ATOMIC, get_order(size));
}
if (ret)
memset(ret, 0, size);
return ret;
}
static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
{
unsigned long size = entries * sizeof(struct neighbour *);
if (size <= PAGE_SIZE)
kfree(hash);
else
free_pages((unsigned long)hash, get_order(size));
}
static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
{
struct neighbour **new_hash, **old_hash;
unsigned int i, new_hash_mask, old_entries;
BUG_ON(new_entries & (new_entries - 1));
new_hash = neigh_hash_alloc(new_entries);
if (!new_hash)
return;
old_entries = tbl->hash_mask + 1;
new_hash_mask = new_entries - 1;
old_hash = tbl->hash_buckets;
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
for (i = 0; i < old_entries; i++) {
struct neighbour *n, *next;
for (n = old_hash[i]; n; n = next) {
unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
hash_val &= new_hash_mask;
next = n->next;
n->next = new_hash[hash_val];
new_hash[hash_val] = n;
}
}
tbl->hash_buckets = new_hash;
tbl->hash_mask = new_hash_mask;
neigh_hash_free(old_hash, old_entries);
}
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev) struct net_device *dev)
{ {
struct neighbour *n; struct neighbour *n;
int key_len = tbl->key_len; int key_len = tbl->key_len;
u32 hash_val = tbl->hash(pkey, dev); u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
read_lock_bh(&tbl->lock); read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
...@@ -304,6 +362,23 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, ...@@ -304,6 +362,23 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
return n; return n;
} }
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
{
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
if (!memcmp(n->primary_key, pkey, key_len)) {
neigh_hold(n);
break;
}
}
read_unlock_bh(&tbl->lock);
return n;
}
struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev) struct net_device *dev)
{ {
...@@ -317,6 +392,12 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, ...@@ -317,6 +392,12 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
goto out; goto out;
} }
if (tbl->entries > (tbl->hash_mask + 1)) {
write_lock_bh(&tbl->lock);
neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
write_unlock_bh(&tbl->lock);
}
memcpy(n->primary_key, pkey, key_len); memcpy(n->primary_key, pkey, key_len);
n->dev = dev; n->dev = dev;
dev_hold(dev); dev_hold(dev);
...@@ -336,9 +417,10 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, ...@@ -336,9 +417,10 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
n->confirmed = jiffies - (n->parms->base_reachable_time << 1); n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
hash_val = tbl->hash(pkey, dev);
write_lock_bh(&tbl->lock); write_lock_bh(&tbl->lock);
hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
if (n->parms->dead) { if (n->parms->dead) {
rc = ERR_PTR(-EINVAL); rc = ERR_PTR(-EINVAL);
goto out_tbl_unlock; goto out_tbl_unlock;
...@@ -428,10 +510,10 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey, ...@@ -428,10 +510,10 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey,
hash_val ^= hash_val >> 4; hash_val ^= hash_val >> 4;
hash_val &= PNEIGH_HASHMASK; hash_val &= PNEIGH_HASHMASK;
write_lock_bh(&tbl->lock);
for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
np = &n->next) { np = &n->next) {
if (!memcmp(n->key, pkey, key_len) && n->dev == dev) { if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
write_lock_bh(&tbl->lock);
*np = n->next; *np = n->next;
write_unlock_bh(&tbl->lock); write_unlock_bh(&tbl->lock);
if (tbl->pdestructor) if (tbl->pdestructor)
...@@ -440,6 +522,7 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey, ...@@ -440,6 +522,7 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey,
return 0; return 0;
} }
} }
write_unlock_bh(&tbl->lock);
return -ENOENT; return -ENOENT;
} }
...@@ -545,9 +628,8 @@ static void neigh_connect(struct neighbour *neigh) ...@@ -545,9 +628,8 @@ static void neigh_connect(struct neighbour *neigh)
static void neigh_periodic_timer(unsigned long arg) static void neigh_periodic_timer(unsigned long arg)
{ {
struct neigh_table *tbl = (struct neigh_table *)arg; struct neigh_table *tbl = (struct neigh_table *)arg;
unsigned long now = jiffies; struct neighbour *n, **np;
int i; unsigned long expire, now = jiffies;
write_lock(&tbl->lock); write_lock(&tbl->lock);
...@@ -563,12 +645,11 @@ static void neigh_periodic_timer(unsigned long arg) ...@@ -563,12 +645,11 @@ static void neigh_periodic_timer(unsigned long arg)
neigh_rand_reach_time(p->base_reachable_time); neigh_rand_reach_time(p->base_reachable_time);
} }
for (i = 0; i <= NEIGH_HASHMASK; i++) { np = &tbl->hash_buckets[tbl->hash_chain_gc];
struct neighbour *n, **np; tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
np = &tbl->hash_buckets[i];
while ((n = *np) != NULL) { while ((n = *np) != NULL) {
unsigned state; unsigned int state;
write_lock(&n->lock); write_lock(&n->lock);
...@@ -595,9 +676,18 @@ static void neigh_periodic_timer(unsigned long arg) ...@@ -595,9 +676,18 @@ static void neigh_periodic_timer(unsigned long arg)
next_elt: next_elt:
np = &n->next; np = &n->next;
} }
}
mod_timer(&tbl->gc_timer, now + tbl->gc_interval); /* Cycle through all hash buckets every base_reachable_time/2 ticks.
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
* base_reachable_time.
*/
expire = tbl->parms.base_reachable_time >> 1;
expire /= (tbl->hash_mask + 1);
if (!expire)
expire = 1;
mod_timer(&tbl->gc_timer, now + expire);
write_unlock(&tbl->lock); write_unlock(&tbl->lock);
} }
...@@ -1205,6 +1295,7 @@ void neigh_parms_destroy(struct neigh_parms *parms) ...@@ -1205,6 +1295,7 @@ void neigh_parms_destroy(struct neigh_parms *parms)
void neigh_table_init(struct neigh_table *tbl) void neigh_table_init(struct neigh_table *tbl)
{ {
unsigned long now = jiffies; unsigned long now = jiffies;
unsigned long phsize;
atomic_set(&tbl->parms.refcnt, 1); atomic_set(&tbl->parms.refcnt, 1);
INIT_RCU_HEAD(&tbl->parms.rcu_head); INIT_RCU_HEAD(&tbl->parms.rcu_head);
...@@ -1220,12 +1311,24 @@ void neigh_table_init(struct neigh_table *tbl) ...@@ -1220,12 +1311,24 @@ void neigh_table_init(struct neigh_table *tbl)
if (!tbl->kmem_cachep) if (!tbl->kmem_cachep)
panic("cannot create neighbour cache"); panic("cannot create neighbour cache");
tbl->hash_mask = 1;
tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
if (!tbl->hash_buckets || !tbl->phash_buckets)
panic("cannot allocate neighbour cache hashes");
memset(tbl->phash_buckets, 0, phsize);
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
tbl->lock = RW_LOCK_UNLOCKED; tbl->lock = RW_LOCK_UNLOCKED;
init_timer(&tbl->gc_timer); init_timer(&tbl->gc_timer);
tbl->gc_timer.data = (unsigned long)tbl; tbl->gc_timer.data = (unsigned long)tbl;
tbl->gc_timer.function = neigh_periodic_timer; tbl->gc_timer.function = neigh_periodic_timer;
tbl->gc_timer.expires = now + tbl->gc_interval + tbl->gc_timer.expires = now + 1;
tbl->parms.reachable_time;
add_timer(&tbl->gc_timer); add_timer(&tbl->gc_timer);
init_timer(&tbl->proxy_timer); init_timer(&tbl->proxy_timer);
...@@ -1260,6 +1363,13 @@ int neigh_table_clear(struct neigh_table *tbl) ...@@ -1260,6 +1363,13 @@ int neigh_table_clear(struct neigh_table *tbl)
} }
} }
write_unlock(&neigh_tbl_lock); write_unlock(&neigh_tbl_lock);
neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
tbl->hash_buckets = NULL;
kfree(tbl->phash_buckets);
tbl->phash_buckets = NULL;
return 0; return 0;
} }
...@@ -1439,7 +1549,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, ...@@ -1439,7 +1549,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
int rc, h, s_h = cb->args[1]; int rc, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2]; int idx, s_idx = idx = cb->args[2];
for (h = 0; h <= NEIGH_HASHMASK; h++) { for (h = 0; h <= tbl->hash_mask; h++) {
if (h < s_h) if (h < s_h)
continue; continue;
if (h > s_h) if (h > s_h)
...@@ -1489,6 +1599,266 @@ int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1489,6 +1599,266 @@ int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len; return skb->len;
} }
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
{
int chain;
read_lock_bh(&tbl->lock);
for (chain = 0; chain <= tbl->hash_mask; chain++) {
struct neighbour *n;
for (n = tbl->hash_buckets[chain]; n; n = n->next)
cb(n, cookie);
}
read_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_for_each);
/* The tbl->lock must be held as a writer and BH disabled. */
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *))
{
int chain;
for (chain = 0; chain <= tbl->hash_mask; chain++) {
struct neighbour *n, **np;
np = &tbl->hash_buckets[chain];
while ((n = *np) != NULL) {
int release;
write_lock(&n->lock);
release = cb(n);
if (release) {
*np = n->next;
n->dead = 1;
} else
np = &n->next;
write_unlock(&n->lock);
if (release)
neigh_release(n);
}
}
}
EXPORT_SYMBOL(__neigh_for_each_release);
#ifdef CONFIG_PROC_FS
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
struct neighbour *n = NULL;
int bucket = state->bucket;
state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
n = tbl->hash_buckets[bucket];
while (n) {
if (state->neigh_sub_iter) {
loff_t fakep = 0;
void *v;
v = state->neigh_sub_iter(state, n, &fakep);
if (!v)
goto next;
}
if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
break;
if (n->nud_state & ~NUD_NOARP)
break;
next:
n = n->next;
}
if (n)
break;
}
state->bucket = bucket;
return n;
}
static struct neighbour *neigh_get_next(struct seq_file *seq,
struct neighbour *n,
loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
if (v)
return n;
}
n = n->next;
while (1) {
while (n) {
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
if (v)
return n;
goto next;
}
if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
break;
if (n->nud_state & ~NUD_NOARP)
break;
next:
n = n->next;
}
if (n)
break;
if (++state->bucket > tbl->hash_mask)
break;
n = tbl->hash_buckets[state->bucket];
}
if (n && pos)
--(*pos);
return n;
}
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct neighbour *n = neigh_get_first(seq);
if (n) {
while (*pos) {
n = neigh_get_next(seq, n, pos);
if (!n)
break;
}
}
return *pos ? NULL : n;
}
static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
struct pneigh_entry *pn = NULL;
int bucket = state->bucket;
state->flags |= NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
pn = tbl->phash_buckets[bucket];
if (pn)
break;
}
state->bucket = bucket;
return pn;
}
static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct pneigh_entry *pn,
loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
pn = pn->next;
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
pn = tbl->phash_buckets[state->bucket];
if (pn)
break;
}
if (pn && pos)
--(*pos);
return pn;
}
static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct pneigh_entry *pn = pneigh_get_first(seq);
if (pn) {
while (*pos) {
pn = pneigh_get_next(seq, pn, pos);
if (!pn)
break;
}
}
return *pos ? NULL : pn;
}
static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
void *rc;
rc = neigh_get_idx(seq, pos);
if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
rc = pneigh_get_idx(seq, pos);
return rc;
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
{
struct neigh_seq_state *state = seq->private;
loff_t pos_minus_one;
state->tbl = tbl;
state->bucket = 0;
state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
read_lock_bh(&tbl->lock);
pos_minus_one = *pos - 1;
return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL(neigh_seq_start);
void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct neigh_seq_state *state;
void *rc;
if (v == SEQ_START_TOKEN) {
rc = neigh_get_idx(seq, pos);
goto out;
}
state = seq->private;
if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
rc = neigh_get_next(seq, v, NULL);
if (rc)
goto out;
if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
rc = pneigh_get_first(seq);
} else {
BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
rc = pneigh_get_next(seq, v, NULL);
}
out:
++(*pos);
return rc;
}
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
read_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_seq_stop);
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_ARPD #ifdef CONFIG_ARPD
void neigh_app_ns(struct neighbour *n) void neigh_app_ns(struct neighbour *n)
{ {
...@@ -1785,6 +2155,7 @@ EXPORT_SYMBOL(neigh_dump_info); ...@@ -1785,6 +2155,7 @@ EXPORT_SYMBOL(neigh_dump_info);
EXPORT_SYMBOL(neigh_event_ns); EXPORT_SYMBOL(neigh_event_ns);
EXPORT_SYMBOL(neigh_ifdown); EXPORT_SYMBOL(neigh_ifdown);
EXPORT_SYMBOL(neigh_lookup); EXPORT_SYMBOL(neigh_lookup);
EXPORT_SYMBOL(neigh_lookup_nodev);
EXPORT_SYMBOL(neigh_parms_alloc); EXPORT_SYMBOL(neigh_parms_alloc);
EXPORT_SYMBOL(neigh_parms_release); EXPORT_SYMBOL(neigh_parms_release);
EXPORT_SYMBOL(neigh_rand_reach_time); EXPORT_SYMBOL(neigh_rand_reach_time);
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <net/neighbour.h> #include <net/neighbour.h>
#include <net/dst.h> #include <net/dst.h>
...@@ -122,13 +123,7 @@ struct neigh_table dn_neigh_table = { ...@@ -122,13 +123,7 @@ struct neigh_table dn_neigh_table = {
static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev) static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(dn_address *)pkey, 0, dn_neigh_table.hash_rnd);
hash_val = *(dn_address *)pkey;
hash_val ^= (hash_val >> 10);
hash_val ^= (hash_val >> 3);
return hash_val & NEIGH_HASHMASK;
} }
static int dn_neigh_construct(struct neighbour *neigh) static int dn_neigh_construct(struct neighbour *neigh)
...@@ -359,27 +354,6 @@ static int dn_phase3_output(struct sk_buff *skb) ...@@ -359,27 +354,6 @@ static int dn_phase3_output(struct sk_buff *skb)
* basically does a neigh_lookup(), but without comparing the device * basically does a neigh_lookup(), but without comparing the device
* field. This is required for the On-Ethernet cache * field. This is required for the On-Ethernet cache
*/ */
struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr)
{
struct neighbour *neigh;
u32 hash_val;
hash_val = tbl->hash(ptr, NULL);
read_lock_bh(&tbl->lock);
for(neigh = tbl->hash_buckets[hash_val]; neigh != NULL; neigh = neigh->next) {
if (memcmp(neigh->primary_key, ptr, tbl->key_len) == 0) {
atomic_inc(&neigh->refcnt);
read_unlock_bh(&tbl->lock);
return neigh;
}
}
read_unlock_bh(&tbl->lock);
return NULL;
}
/* /*
* Any traffic on a pointopoint link causes the timer to be reset * Any traffic on a pointopoint link causes the timer to be reset
* for the entry in the neighbour table. * for the entry in the neighbour table.
...@@ -514,141 +488,66 @@ static char *dn_find_slot(char *base, int max, int priority) ...@@ -514,141 +488,66 @@ static char *dn_find_slot(char *base, int max, int priority)
return (*min < priority) ? (min - 6) : NULL; return (*min < priority) ? (min - 6) : NULL;
} }
int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n) struct elist_cb_state {
struct net_device *dev;
unsigned char *ptr;
unsigned char *rs;
int t, n;
};
static void neigh_elist_cb(struct neighbour *neigh, void *_info)
{ {
int t = 0; struct elist_cb_state *s = _info;
int i; struct dn_dev *dn_db;
struct neighbour *neigh;
struct dn_neigh *dn; struct dn_neigh *dn;
struct neigh_table *tbl = &dn_neigh_table;
unsigned char *rs = ptr;
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
read_lock_bh(&tbl->lock); if (neigh->dev != s->dev)
return;
for(i = 0; i < NEIGH_HASHMASK; i++) { dn = (struct dn_neigh *) neigh;
for(neigh = tbl->hash_buckets[i]; neigh != NULL; neigh = neigh->next) {
if (neigh->dev != dev)
continue;
dn = (struct dn_neigh *)neigh;
if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
continue; return;
if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2))
continue;
if (t == n)
rs = dn_find_slot(ptr, n, dn->priority);
else
t++;
if (rs == NULL)
continue;
dn_dn2eth(rs, dn->addr);
rs += 6;
*rs = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
*rs |= dn->priority;
rs++;
}
}
read_unlock_bh(&tbl->lock);
return t;
}
#ifdef CONFIG_PROC_FS
struct dn_neigh_iter_state {
int bucket;
};
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct dn_neigh_iter_state *state = seq->private;
struct neighbour *n = NULL;
for(state->bucket = 0;
state->bucket <= NEIGH_HASHMASK;
++state->bucket) {
n = dn_neigh_table.hash_buckets[state->bucket];
if (n)
break;
}
return n; dn_db = (struct dn_dev *) s->dev->dn_ptr;
} if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2))
return;
static struct neighbour *neigh_get_next(struct seq_file *seq, if (s->t == s->n)
struct neighbour *n) s->rs = dn_find_slot(s->ptr, s->n, dn->priority);
{ else
struct dn_neigh_iter_state *state = seq->private; s->t++;
if (s->rs == NULL)
return;
n = n->next; dn_dn2eth(s->rs, dn->addr);
try_again: s->rs += 6;
if (n) *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
goto out; *(s->rs) |= dn->priority;
if (++state->bucket > NEIGH_HASHMASK) s->rs++;
goto out;
n = dn_neigh_table.hash_buckets[state->bucket];
goto try_again;
out:
return n;
} }
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
{ {
struct neighbour *n = neigh_get_first(seq); struct elist_cb_state state;
if (n) state.dev = dev;
while(*pos && (n = neigh_get_next(seq, n))) state.t = 0;
--*pos; state.n = n;
return *pos ? NULL : n; state.ptr = ptr;
} state.rs = ptr;
static void *dn_neigh_get_idx(struct seq_file *seq, loff_t pos) neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state);
{
void *rc;
read_lock_bh(&dn_neigh_table.lock);
rc = neigh_get_idx(seq, &pos);
if (!rc) {
read_unlock_bh(&dn_neigh_table.lock);
}
return rc;
}
static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos) return state.t;
{
return *pos ? dn_neigh_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
} }
static void *dn_neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc;
if (v == SEQ_START_TOKEN) {
rc = dn_neigh_get_idx(seq, 0);
goto out;
}
rc = neigh_get_next(seq, v); #ifdef CONFIG_PROC_FS
if (rc)
goto out;
read_unlock_bh(&dn_neigh_table.lock);
out:
++*pos;
return rc;
}
static void dn_neigh_seq_stop(struct seq_file *seq, void *v)
{
if (v && v != SEQ_START_TOKEN)
read_unlock_bh(&dn_neigh_table.lock);
}
static inline void dn_neigh_format_entry(struct seq_file *seq, static inline void dn_neigh_format_entry(struct seq_file *seq,
struct neighbour *n) struct neighbour *n)
{ {
struct dn_neigh *dn = (struct dn_neigh *)n; struct dn_neigh *dn = (struct dn_neigh *) n;
char buf[DN_ASCBUF_LEN]; char buf[DN_ASCBUF_LEN];
read_lock(&n->lock); read_lock(&n->lock);
...@@ -675,10 +574,16 @@ static int dn_neigh_seq_show(struct seq_file *seq, void *v) ...@@ -675,10 +574,16 @@ static int dn_neigh_seq_show(struct seq_file *seq, void *v)
return 0; return 0;
} }
static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
{
return neigh_seq_start(seq, pos, &dn_neigh_table,
NEIGH_SEQ_NEIGH_ONLY);
}
static struct seq_operations dn_neigh_seq_ops = { static struct seq_operations dn_neigh_seq_ops = {
.start = dn_neigh_seq_start, .start = dn_neigh_seq_start,
.next = dn_neigh_seq_next, .next = neigh_seq_next,
.stop = dn_neigh_seq_stop, .stop = neigh_seq_stop,
.show = dn_neigh_seq_show, .show = dn_neigh_seq_show,
}; };
...@@ -686,11 +591,12 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file) ...@@ -686,11 +591,12 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
{ {
struct seq_file *seq; struct seq_file *seq;
int rc = -ENOMEM; int rc = -ENOMEM;
struct dn_neigh_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
goto out; goto out;
memset(s, 0, sizeof(*s));
rc = seq_open(file, &dn_neigh_seq_ops); rc = seq_open(file, &dn_neigh_seq_ops);
if (rc) if (rc)
goto out_kfree; goto out_kfree;
......
...@@ -996,7 +996,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old ...@@ -996,7 +996,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
* here * here
*/ */
if (!try_hard) { if (!try_hard) {
neigh = dn_neigh_lookup(&dn_neigh_table, &fl.fld_dst); neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
if (neigh) { if (neigh) {
if ((oldflp->oif && if ((oldflp->oif &&
(neigh->dev->ifindex != oldflp->oif)) || (neigh->dev->ifindex != oldflp->oif)) ||
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
* arp_xmit so intermediate drivers like * arp_xmit so intermediate drivers like
* bonding can change the skb before * bonding can change the skb before
* sending (e.g. insert 8021q tag). * sending (e.g. insert 8021q tag).
* Harald Welte : convert to make use of jenkins hash
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -97,6 +98,7 @@ ...@@ -97,6 +98,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
#include <linux/sysctl.h> #include <linux/sysctl.h>
#endif #endif
...@@ -223,15 +225,7 @@ int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir) ...@@ -223,15 +225,7 @@ int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir)
static u32 arp_hash(const void *pkey, const struct net_device *dev) static u32 arp_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(u32 *)pkey, dev->ifindex, arp_tbl.hash_rnd);
hash_val = *(u32*)pkey;
hash_val ^= (hash_val>>16);
hash_val ^= hash_val>>8;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val;
} }
static int arp_constructor(struct neighbour *neigh) static int arp_constructor(struct neighbour *neigh)
...@@ -1269,161 +1263,9 @@ static char *ax2asc2(ax25_address *a, char *buf) ...@@ -1269,161 +1263,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
} }
#endif /* CONFIG_AX25 */ #endif /* CONFIG_AX25 */
struct arp_iter_state {
int is_pneigh, bucket;
};
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct arp_iter_state* state = seq->private;
struct neighbour *n = NULL;
state->is_pneigh = 0;
for (state->bucket = 0;
state->bucket <= NEIGH_HASHMASK;
++state->bucket) {
n = arp_tbl.hash_buckets[state->bucket];
while (n && !(n->nud_state & ~NUD_NOARP))
n = n->next;
if (n)
break;
}
return n;
}
static struct neighbour *neigh_get_next(struct seq_file *seq,
struct neighbour *n)
{
struct arp_iter_state* state = seq->private;
do {
n = n->next;
/* Don't confuse "arp -a" w/ magic entries */
try_again:
;
} while (n && !(n->nud_state & ~NUD_NOARP));
if (n)
goto out;
if (++state->bucket > NEIGH_HASHMASK)
goto out;
n = arp_tbl.hash_buckets[state->bucket];
goto try_again;
out:
return n;
}
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct neighbour *n = neigh_get_first(seq);
if (n)
while (*pos && (n = neigh_get_next(seq, n)))
--*pos;
return *pos ? NULL : n;
}
static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
{
struct arp_iter_state* state = seq->private;
struct pneigh_entry *pn;
state->is_pneigh = 1;
for (state->bucket = 0;
state->bucket <= PNEIGH_HASHMASK;
++state->bucket) {
pn = arp_tbl.phash_buckets[state->bucket];
if (pn)
break;
}
return pn;
}
static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct pneigh_entry *pn)
{
struct arp_iter_state* state = seq->private;
pn = pn->next;
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
pn = arp_tbl.phash_buckets[state->bucket];
}
return pn;
}
static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t pos)
{
struct pneigh_entry *pn = pneigh_get_first(seq);
if (pn)
while (pos && (pn = pneigh_get_next(seq, pn)))
--pos;
return pos ? NULL : pn;
}
static void *arp_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc;
read_lock_bh(&arp_tbl.lock);
rc = neigh_get_idx(seq, &pos);
if (!rc) {
read_unlock_bh(&arp_tbl.lock);
rc = pneigh_get_idx(seq, pos);
}
return rc;
}
static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct arp_iter_state* state = seq->private;
state->is_pneigh = 0;
state->bucket = 0;
return *pos ? arp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *arp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc;
struct arp_iter_state* state;
if (v == SEQ_START_TOKEN) {
rc = arp_get_idx(seq, 0);
goto out;
}
state = seq->private;
if (!state->is_pneigh) {
rc = neigh_get_next(seq, v);
if (rc)
goto out;
read_unlock_bh(&arp_tbl.lock);
rc = pneigh_get_first(seq);
} else
rc = pneigh_get_next(seq, v);
out:
++*pos;
return rc;
}
static void arp_seq_stop(struct seq_file *seq, void *v)
{
struct arp_iter_state* state = seq->private;
if (!state->is_pneigh && v != SEQ_START_TOKEN)
read_unlock_bh(&arp_tbl.lock);
}
#define HBUFFERLEN 30 #define HBUFFERLEN 30
static __inline__ void arp_format_neigh_entry(struct seq_file *seq, static void arp_format_neigh_entry(struct seq_file *seq,
struct neighbour *n) struct neighbour *n)
{ {
char hbuffer[HBUFFERLEN]; char hbuffer[HBUFFERLEN];
...@@ -1455,7 +1297,7 @@ static __inline__ void arp_format_neigh_entry(struct seq_file *seq, ...@@ -1455,7 +1297,7 @@ static __inline__ void arp_format_neigh_entry(struct seq_file *seq,
read_unlock(&n->lock); read_unlock(&n->lock);
} }
static __inline__ void arp_format_pneigh_entry(struct seq_file *seq, static void arp_format_pneigh_entry(struct seq_file *seq,
struct pneigh_entry *n) struct pneigh_entry *n)
{ {
struct net_device *dev = n->dev; struct net_device *dev = n->dev;
...@@ -1470,13 +1312,13 @@ static __inline__ void arp_format_pneigh_entry(struct seq_file *seq, ...@@ -1470,13 +1312,13 @@ static __inline__ void arp_format_pneigh_entry(struct seq_file *seq,
static int arp_seq_show(struct seq_file *seq, void *v) static int arp_seq_show(struct seq_file *seq, void *v)
{ {
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN) {
seq_puts(seq, "IP address HW type Flags " seq_puts(seq, "IP address HW type Flags "
"HW address Mask Device\n"); "HW address Mask Device\n");
else { } else {
struct arp_iter_state* state = seq->private; struct neigh_seq_state *state = seq->private;
if (state->is_pneigh) if (state->flags & NEIGH_SEQ_IS_PNEIGH)
arp_format_pneigh_entry(seq, v); arp_format_pneigh_entry(seq, v);
else else
arp_format_neigh_entry(seq, v); arp_format_neigh_entry(seq, v);
...@@ -1485,12 +1327,20 @@ static int arp_seq_show(struct seq_file *seq, void *v) ...@@ -1485,12 +1327,20 @@ static int arp_seq_show(struct seq_file *seq, void *v)
return 0; return 0;
} }
static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
{
/* Don't want to confuse "arp -a" w/ magic entries,
* so we tell the generic iterator to skip NUD_NOARP.
*/
return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_SKIP_NOARP);
}
/* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */
static struct seq_operations arp_seq_ops = { static struct seq_operations arp_seq_ops = {
.start = arp_seq_start, .start = arp_seq_start,
.next = arp_seq_next, .next = neigh_seq_next,
.stop = arp_seq_stop, .stop = neigh_seq_stop,
.show = arp_seq_show, .show = arp_seq_show,
}; };
...@@ -1498,11 +1348,12 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -1498,11 +1348,12 @@ static int arp_seq_open(struct inode *inode, struct file *file)
{ {
struct seq_file *seq; struct seq_file *seq;
int rc = -ENOMEM; int rc = -ENOMEM;
struct arp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
goto out; goto out;
memset(s, 0, sizeof(*s));
rc = seq_open(file, &arp_seq_ops); rc = seq_open(file, &arp_seq_ops);
if (rc) if (rc)
goto out_kfree; goto out_kfree;
......
...@@ -438,17 +438,15 @@ static struct fib_alias *fib_find_alias(struct fib_node *fn, u8 tos, u32 prio) ...@@ -438,17 +438,15 @@ static struct fib_alias *fib_find_alias(struct fib_node *fn, u8 tos, u32 prio)
{ {
if (fn) { if (fn) {
struct list_head *head = &fn->fn_alias; struct list_head *head = &fn->fn_alias;
struct fib_alias *fa, *prev_fa; struct fib_alias *fa;
prev_fa = NULL;
list_for_each_entry(fa, head, fa_list) { list_for_each_entry(fa, head, fa_list) {
if (fa->fa_tos != tos) if (fa->fa_tos > tos)
continue; continue;
prev_fa = fa; if (fa->fa_info->fib_priority >= prio ||
if (prio <= fa->fa_info->fib_priority) fa->fa_tos < tos)
break; return fa;
} }
return prev_fa;
} }
return NULL; return NULL;
} }
...@@ -505,7 +503,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -505,7 +503,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
* and we need to allocate a new one of those as well. * and we need to allocate a new one of those as well.
*/ */
if (fa && if (fa && fa->fa_tos == tos &&
fa->fa_info->fib_priority == fi->fib_priority) { fa->fa_info->fib_priority == fi->fib_priority) {
struct fib_alias *fa_orig; struct fib_alias *fa_orig;
...@@ -537,7 +535,8 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -537,7 +535,8 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
* information. * information.
*/ */
fa_orig = fa; fa_orig = fa;
list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) { fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
if (fa->fa_tos != tos) if (fa->fa_tos != tos)
break; break;
if (fa->fa_info->fib_priority != fi->fib_priority) if (fa->fa_info->fib_priority != fi->fib_priority)
...@@ -585,7 +584,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -585,7 +584,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
write_lock_bh(&fib_hash_lock); write_lock_bh(&fib_hash_lock);
if (new_f) if (new_f)
fib_insert_node(fz, new_f); fib_insert_node(fz, new_f);
list_add(&new_fa->fa_list, list_add_tail(&new_fa->fa_list,
(fa ? &fa->fa_list : &f->fn_alias)); (fa ? &fa->fa_list : &f->fn_alias));
write_unlock_bh(&fib_hash_lock); write_unlock_bh(&fib_hash_lock);
...@@ -611,7 +610,6 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -611,7 +610,6 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fn_hash *table = (struct fn_hash*)tb->tb_data;
struct fib_node *f; struct fib_node *f;
struct fib_alias *fa, *fa_to_delete; struct fib_alias *fa, *fa_to_delete;
struct list_head *fa_head;
int z = r->rtm_dst_len; int z = r->rtm_dst_len;
struct fn_zone *fz; struct fn_zone *fz;
u32 key; u32 key;
...@@ -637,8 +635,8 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -637,8 +635,8 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
return -ESRCH; return -ESRCH;
fa_to_delete = NULL; fa_to_delete = NULL;
fa_head = fa->fa_list.prev; fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
list_for_each_entry(fa, fa_head, fa_list) { list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
struct fib_info *fi = fa->fa_info; struct fib_info *fi = fa->fa_info;
if (fa->fa_tos != tos) if (fa->fa_tos != tos)
......
...@@ -41,6 +41,12 @@ static struct sock *tcpnl; ...@@ -41,6 +41,12 @@ static struct sock *tcpnl;
rta->rta_len = rtalen; \ rta->rta_len = rtalen; \
RTA_DATA(rta); }) RTA_DATA(rta); })
static inline unsigned int jiffies_to_usecs(const unsigned long j)
{
return 1000*jiffies_to_msecs(j);
}
/* Return information about state of tcp endpoint in API format. */ /* Return information about state of tcp endpoint in API format. */
void tcp_get_info(struct sock *sk, struct tcp_info *info) void tcp_get_info(struct sock *sk, struct tcp_info *info)
{ {
...@@ -68,8 +74,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -68,8 +74,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (tp->ecn_flags&TCP_ECN_OK) if (tp->ecn_flags&TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN; info->tcpi_options |= TCPI_OPT_ECN;
info->tcpi_rto = (1000000*tp->rto)/HZ; info->tcpi_rto = jiffies_to_usecs(tp->rto);
info->tcpi_ato = (1000000*tp->ack.ato)/HZ; info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
info->tcpi_snd_mss = tp->mss_cache_std; info->tcpi_snd_mss = tp->mss_cache_std;
info->tcpi_rcv_mss = tp->ack.rcv_mss; info->tcpi_rcv_mss = tp->ack.rcv_mss;
...@@ -79,20 +85,20 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -79,20 +85,20 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tcp_get_pcount(&tp->retrans_out); info->tcpi_retrans = tcp_get_pcount(&tp->retrans_out);
info->tcpi_fackets = tcp_get_pcount(&tp->fackets_out); info->tcpi_fackets = tcp_get_pcount(&tp->fackets_out);
info->tcpi_last_data_sent = ((now - tp->lsndtime)*1000)/HZ; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = ((now - tp->ack.lrcvtime)*1000)/HZ; info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
info->tcpi_last_ack_recv = ((now - tp->rcv_tstamp)*1000)/HZ; info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
info->tcpi_pmtu = tp->pmtu_cookie; info->tcpi_pmtu = tp->pmtu_cookie;
info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
info->tcpi_rtt = ((1000000*tp->srtt)/HZ)>>3; info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
info->tcpi_rttvar = ((1000000*tp->mdev)/HZ)>>2; info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
info->tcpi_snd_ssthresh = tp->snd_ssthresh; info->tcpi_snd_ssthresh = tp->snd_ssthresh;
info->tcpi_snd_cwnd = tp->snd_cwnd; info->tcpi_snd_cwnd = tp->snd_cwnd;
info->tcpi_advmss = tp->advmss; info->tcpi_advmss = tp->advmss;
info->tcpi_reordering = tp->reordering; info->tcpi_reordering = tp->reordering;
info->tcpi_rcv_rtt = ((1000000*tp->rcv_rtt_est.rtt)/HZ)>>3; info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
info->tcpi_rcv_space = tp->rcvq_space.space; info->tcpi_rcv_space = tp->rcvq_space.space;
} }
...@@ -116,7 +122,8 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -116,7 +122,8 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
if (ext & (1<<(TCPDIAG_INFO-1))) if (ext & (1<<(TCPDIAG_INFO-1)))
info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info)); info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
if (tcp_is_vegas(tp) && (ext & (1<<(TCPDIAG_VEGASINFO-1)))) if ((tcp_is_westwood(tp) || tcp_is_vegas(tp))
&& (ext & (1<<(TCPDIAG_VEGASINFO-1))))
vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo)); vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo));
} }
r->tcpdiag_family = sk->sk_family; r->tcpdiag_family = sk->sk_family;
...@@ -209,10 +216,17 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -209,10 +216,17 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
tcp_get_info(sk, info); tcp_get_info(sk, info);
if (vinfo) { if (vinfo) {
if (tcp_is_vegas(tp)) {
vinfo->tcpv_enabled = tp->vegas.doing_vegas_now; vinfo->tcpv_enabled = tp->vegas.doing_vegas_now;
vinfo->tcpv_rttcnt = tp->vegas.cntRTT; vinfo->tcpv_rttcnt = tp->vegas.cntRTT;
vinfo->tcpv_rtt = tp->vegas.baseRTT; vinfo->tcpv_rtt = jiffies_to_usecs(tp->vegas.baseRTT);
vinfo->tcpv_minrtt = tp->vegas.minRTT; vinfo->tcpv_minrtt = jiffies_to_usecs(tp->vegas.minRTT);
} else {
vinfo->tcpv_enabled = 0;
vinfo->tcpv_rttcnt = 0;
vinfo->tcpv_rtt = jiffies_to_usecs(tp->westwood.rtt);
vinfo->tcpv_minrtt = jiffies_to_usecs(tp->westwood.rtt_min);
}
} }
nlh->nlmsg_len = skb->tail - b; nlh->nlmsg_len = skb->tail - b;
......
...@@ -555,17 +555,20 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b ...@@ -555,17 +555,20 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b
tcp_grow_window(sk, tp, skb); tcp_grow_window(sk, tp, skb);
} }
/* Set up a new TCP connection, depending on whether it should be /* When starting a new connection, pin down the current choice of
* using Vegas or not. * congestion algorithm.
*/ */
void tcp_vegas_init(struct tcp_opt *tp) void tcp_ca_init(struct tcp_opt *tp)
{ {
if (sysctl_tcp_vegas_cong_avoid) { if (sysctl_tcp_westwood)
tp->vegas.do_vegas = 1; tp->adv_cong = TCP_WESTWOOD;
else if (sysctl_tcp_bic)
tp->adv_cong = TCP_BIC;
else if (sysctl_tcp_vegas_cong_avoid) {
tp->adv_cong = TCP_VEGAS;
tp->vegas.baseRTT = 0x7fffffff; tp->vegas.baseRTT = 0x7fffffff;
tcp_vegas_enable(tp); tcp_vegas_enable(tp);
} else }
tcp_vegas_disable(tp);
} }
/* Do RTT sampling needed for Vegas. /* Do RTT sampling needed for Vegas.
...@@ -2039,7 +2042,7 @@ tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt) ...@@ -2039,7 +2042,7 @@ tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt)
static inline __u32 bictcp_cwnd(struct tcp_opt *tp) static inline __u32 bictcp_cwnd(struct tcp_opt *tp)
{ {
/* orignal Reno behaviour */ /* orignal Reno behaviour */
if (!sysctl_tcp_bic) if (!tcp_is_bic(tp))
return tp->snd_cwnd; return tp->snd_cwnd;
if (tp->bictcp.last_cwnd == tp->snd_cwnd && if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
...@@ -2617,18 +2620,16 @@ static void westwood_filter(struct sock *sk, __u32 delta) ...@@ -2617,18 +2620,16 @@ static void westwood_filter(struct sock *sk, __u32 delta)
* WESTWOOD_RTT_MIN minimum bound since we could be on a LAN! * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
*/ */
static inline __u32 westwood_update_rttmin(struct sock *sk) static inline __u32 westwood_update_rttmin(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
__u32 rttmin = tp->westwood.rtt_min; __u32 rttmin = tp->westwood.rtt_min;
if (tp->westwood.rtt == 0) if (tp->westwood.rtt != 0 &&
return(rttmin); (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin))
if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
rttmin = tp->westwood.rtt; rttmin = tp->westwood.rtt;
return(rttmin); return rttmin;
} }
/* /*
...@@ -2636,11 +2637,11 @@ static inline __u32 westwood_update_rttmin(struct sock *sk) ...@@ -2636,11 +2637,11 @@ static inline __u32 westwood_update_rttmin(struct sock *sk)
* Evaluate increases for dk. * Evaluate increases for dk.
*/ */
static inline __u32 westwood_acked(struct sock *sk) static inline __u32 westwood_acked(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
return ((tp->snd_una) - (tp->westwood.snd_una)); return tp->snd_una - tp->westwood.snd_una;
} }
/* /*
...@@ -2652,9 +2653,9 @@ static inline __u32 westwood_acked(struct sock *sk) ...@@ -2652,9 +2653,9 @@ static inline __u32 westwood_acked(struct sock *sk)
* window, 1 if the sample has to be considered in the next window. * window, 1 if the sample has to be considered in the next window.
*/ */
static int westwood_new_window(struct sock *sk) static int westwood_new_window(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
__u32 left_bound; __u32 left_bound;
__u32 rtt; __u32 rtt;
int ret = 0; int ret = 0;
...@@ -2688,14 +2689,13 @@ static void __westwood_update_window(struct sock *sk, __u32 now) ...@@ -2688,14 +2689,13 @@ static void __westwood_update_window(struct sock *sk, __u32 now)
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
__u32 delta = now - tp->westwood.rtt_win_sx; __u32 delta = now - tp->westwood.rtt_win_sx;
if (!delta) if (delta) {
return;
if (tp->westwood.rtt) if (tp->westwood.rtt)
westwood_filter(sk, delta); westwood_filter(sk, delta);
tp->westwood.bk = 0; tp->westwood.bk = 0;
tp->westwood.rtt_win_sx = tcp_time_stamp; tp->westwood.rtt_win_sx = tcp_time_stamp;
}
} }
...@@ -2739,7 +2739,7 @@ static void westwood_dupack_update(struct sock *sk) ...@@ -2739,7 +2739,7 @@ static void westwood_dupack_update(struct sock *sk)
static inline int westwood_may_change_cumul(struct tcp_opt *tp) static inline int westwood_may_change_cumul(struct tcp_opt *tp)
{ {
return ((tp->westwood.cumul_ack) > tp->mss_cache_std); return (tp->westwood.cumul_ack > tp->mss_cache_std);
} }
static inline void westwood_partial_update(struct tcp_opt *tp) static inline void westwood_partial_update(struct tcp_opt *tp)
...@@ -2760,7 +2760,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp) ...@@ -2760,7 +2760,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp)
* delayed or partial acks. * delayed or partial acks.
*/ */
static __u32 westwood_acked_count(struct sock *sk) static inline __u32 westwood_acked_count(struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
...@@ -2774,7 +2774,7 @@ static __u32 westwood_acked_count(struct sock *sk) ...@@ -2774,7 +2774,7 @@ static __u32 westwood_acked_count(struct sock *sk)
if (westwood_may_change_cumul(tp)) { if (westwood_may_change_cumul(tp)) {
/* Partial or delayed ack */ /* Partial or delayed ack */
if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack)) if (tp->westwood.accounted >= tp->westwood.cumul_ack)
westwood_partial_update(tp); westwood_partial_update(tp);
else else
westwood_complete_update(tp); westwood_complete_update(tp);
......
...@@ -841,7 +841,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -841,7 +841,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
if (newtp->ecn_flags&TCP_ECN_OK) if (newtp->ecn_flags&TCP_ECN_OK)
newsk->sk_no_largesend = 1; newsk->sk_no_largesend = 1;
tcp_vegas_init(newtp); tcp_ca_init(newtp);
TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
} }
return newsk; return newsk;
......
...@@ -1359,7 +1359,7 @@ static inline void tcp_connect_init(struct sock *sk) ...@@ -1359,7 +1359,7 @@ static inline void tcp_connect_init(struct sock *sk)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = dst_metric(dst, RTAX_ADVMSS); tp->advmss = dst_metric(dst, RTAX_ADVMSS);
tcp_initialize_rcv_mss(sk); tcp_initialize_rcv_mss(sk);
tcp_vegas_init(tp); tcp_ca_init(tp);
tcp_select_initial_window(tcp_full_space(sk), tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
...@@ -1411,7 +1411,7 @@ int tcp_connect(struct sock *sk) ...@@ -1411,7 +1411,7 @@ int tcp_connect(struct sock *sk)
TCP_SKB_CB(buff)->end_seq = tp->write_seq; TCP_SKB_CB(buff)->end_seq = tp->write_seq;
tp->snd_nxt = tp->write_seq; tp->snd_nxt = tp->write_seq;
tp->pushed_seq = tp->write_seq; tp->pushed_seq = tp->write_seq;
tcp_vegas_init(tp); tcp_ca_init(tp);
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(buff)->when = tcp_time_stamp; TCP_SKB_CB(buff)->when = tcp_time_stamp;
......
...@@ -128,6 +128,9 @@ static struct timer_list addr_chk_timer = ...@@ -128,6 +128,9 @@ static struct timer_list addr_chk_timer =
TIMER_INITIALIZER(addrconf_verify, 0, 0); TIMER_INITIALIZER(addrconf_verify, 0, 0);
static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED; static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED;
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
static int addrconf_ifdown(struct net_device *dev, int how); static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags); static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
...@@ -419,32 +422,27 @@ static void dev_forward_change(struct inet6_dev *idev) ...@@ -419,32 +422,27 @@ static void dev_forward_change(struct inet6_dev *idev)
ipv6_dev_mc_dec(dev, &addr); ipv6_dev_mc_dec(dev, &addr);
} }
for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
ipv6_addr_prefix(&addr, &ifa->addr, ifa->prefix_len);
if (ipv6_addr_any(&addr))
continue;
if (idev->cnf.forwarding) if (idev->cnf.forwarding)
ipv6_dev_ac_inc(idev->dev, &addr); addrconf_join_anycast(ifa);
else else
ipv6_dev_ac_dec(idev->dev, &addr); addrconf_leave_anycast(ifa);
} }
} }
static void addrconf_forward_change(struct inet6_dev *idev) static void addrconf_forward_change(void)
{ {
struct net_device *dev; struct net_device *dev;
struct inet6_dev *idev;
if (idev) {
dev_forward_change(idev);
return;
}
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
for (dev=dev_base; dev; dev=dev->next) { for (dev=dev_base; dev; dev=dev->next) {
read_lock(&addrconf_lock); read_lock(&addrconf_lock);
idev = __in6_dev_get(dev); idev = __in6_dev_get(dev);
if (idev) { if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!ipv6_devconf.forwarding);
idev->cnf.forwarding = ipv6_devconf.forwarding; idev->cnf.forwarding = ipv6_devconf.forwarding;
if (changed)
dev_forward_change(idev); dev_forward_change(idev);
} }
read_unlock(&addrconf_lock); read_unlock(&addrconf_lock);
...@@ -1062,17 +1060,34 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) ...@@ -1062,17 +1060,34 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
ipv6_dev_mc_inc(dev, &maddr); ipv6_dev_mc_inc(dev, &maddr);
} }
void addrconf_leave_solict(struct net_device *dev, struct in6_addr *addr) void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct in6_addr maddr; struct in6_addr maddr;
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return; return;
addrconf_addr_solict_mult(addr, &maddr); addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_dec(dev, &maddr); __ipv6_dev_mc_dec(idev, &maddr);
}
void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
} }
void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
__ipv6_dev_ac_dec(ifp->idev, &addr);
}
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{ {
...@@ -2225,14 +2240,6 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) ...@@ -2225,14 +2240,6 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval); addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval);
spin_unlock_bh(&ifp->lock); spin_unlock_bh(&ifp->lock);
} }
if (ifp->idev->cnf.forwarding) {
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (!ipv6_addr_any(&addr))
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
}
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
...@@ -2994,16 +3001,13 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) ...@@ -2994,16 +3001,13 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
dst_hold(&ifp->rt->u.dst); dst_hold(&ifp->rt->u.dst);
if (ip6_ins_rt(ifp->rt, NULL, NULL)) if (ip6_ins_rt(ifp->rt, NULL, NULL))
dst_release(&ifp->rt->u.dst); dst_release(&ifp->rt->u.dst);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
break; break;
case RTM_DELADDR: case RTM_DELADDR:
addrconf_leave_solict(ifp->idev->dev, &ifp->addr); if (ifp->idev->cnf.forwarding)
if (ifp->idev->cnf.forwarding) { addrconf_leave_anycast(ifp);
struct in6_addr addr; addrconf_leave_solict(ifp->idev, &ifp->addr);
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (!ipv6_addr_any(&addr))
ipv6_dev_ac_dec(ifp->idev->dev, &addr);
}
dst_hold(&ifp->rt->u.dst); dst_hold(&ifp->rt->u.dst);
if (ip6_del_rt(ifp->rt, NULL, NULL)) if (ip6_del_rt(ifp->rt, NULL, NULL))
dst_free(&ifp->rt->u.dst); dst_free(&ifp->rt->u.dst);
...@@ -3025,18 +3029,18 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp, ...@@ -3025,18 +3029,18 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val && valp != &ipv6_devconf_dflt.forwarding) { if (write && valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev = NULL;
if (valp != &ipv6_devconf.forwarding) { if (valp != &ipv6_devconf.forwarding) {
idev = (struct inet6_dev *)ctl->extra1; if ((!*valp) ^ (!val)) {
struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
if (idev == NULL) if (idev == NULL)
return ret; return ret;
} else dev_forward_change(idev);
}
} else {
ipv6_devconf_dflt.forwarding = ipv6_devconf.forwarding; ipv6_devconf_dflt.forwarding = ipv6_devconf.forwarding;
addrconf_forward_change();
addrconf_forward_change(idev); }
if (*valp) if (*valp)
rt6_purge_dflt_routers(0); rt6_purge_dflt_routers(0);
} }
...@@ -3077,15 +3081,19 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, ...@@ -3077,15 +3081,19 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
} }
if (valp != &ipv6_devconf_dflt.forwarding) { if (valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev;
if (valp != &ipv6_devconf.forwarding) { if (valp != &ipv6_devconf.forwarding) {
idev = (struct inet6_dev *)table->extra1; struct inet6_dev *idev = (struct inet6_dev *)table->extra1;
int changed;
if (unlikely(idev == NULL)) if (unlikely(idev == NULL))
return -ENODEV; return -ENODEV;
} else changed = (!*valp) ^ (!new);
idev = NULL; *valp = new;
if (changed)
dev_forward_change(idev);
} else {
*valp = new; *valp = new;
addrconf_forward_change(idev); addrconf_forward_change();
}
if (*valp) if (*valp)
rt6_purge_dflt_routers(0); rt6_purge_dflt_routers(0);
......
...@@ -377,15 +377,10 @@ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) ...@@ -377,15 +377,10 @@ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr)
/* /*
* device anycast group decrement * device anycast group decrement
*/ */
int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct inet6_dev *idev;
struct ifacaddr6 *aca, *prev_aca; struct ifacaddr6 *aca, *prev_aca;
idev = in6_dev_get(dev);
if (idev == NULL)
return -ENODEV;
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
prev_aca = NULL; prev_aca = NULL;
for (aca = idev->ac_list; aca; aca = aca->aca_next) { for (aca = idev->ac_list; aca; aca = aca->aca_next) {
...@@ -395,12 +390,10 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -395,12 +390,10 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
} }
if (!aca) { if (!aca) {
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENOENT; return -ENOENT;
} }
if (--aca->aca_users > 0) { if (--aca->aca_users > 0) {
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return 0; return 0;
} }
if (prev_aca) if (prev_aca)
...@@ -408,7 +401,7 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -408,7 +401,7 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
else else
idev->ac_list = aca->aca_next; idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
addrconf_leave_solict(dev, &aca->aca_addr); addrconf_leave_solict(idev, &aca->aca_addr);
dst_hold(&aca->aca_rt->u.dst); dst_hold(&aca->aca_rt->u.dst);
if (ip6_del_rt(aca->aca_rt, NULL, NULL)) if (ip6_del_rt(aca->aca_rt, NULL, NULL))
...@@ -417,10 +410,20 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -417,10 +410,20 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
dst_release(&aca->aca_rt->u.dst); dst_release(&aca->aca_rt->u.dst);
aca_put(aca); aca_put(aca);
in6_dev_put(idev);
return 0; return 0;
} }
int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
{
int ret;
struct inet6_dev *idev = in6_dev_get(dev);
if (idev == NULL)
return -ENODEV;
ret = __ipv6_dev_ac_dec(idev, addr);
in6_dev_put(idev);
return ret;
}
/* /*
* check if the interface has this anycast address * check if the interface has this anycast address
*/ */
......
...@@ -128,6 +128,8 @@ static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED; ...@@ -128,6 +128,8 @@ static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED;
static struct socket *igmp6_socket; static struct socket *igmp6_socket;
int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma);
static void igmp6_timer_handler(unsigned long data); static void igmp6_timer_handler(unsigned long data);
...@@ -256,9 +258,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) ...@@ -256,9 +258,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
if (idev) { if (idev) {
(void) ip6_mc_leave_src(sk,mc_lst,idev); (void) ip6_mc_leave_src(sk,mc_lst,idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev); in6_dev_put(idev);
} }
ipv6_dev_mc_dec(dev, &mc_lst->addr);
dev_put(dev); dev_put(dev);
} }
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
...@@ -322,9 +324,9 @@ void ipv6_sock_mc_close(struct sock *sk) ...@@ -322,9 +324,9 @@ void ipv6_sock_mc_close(struct sock *sk)
if (idev) { if (idev) {
(void) ip6_mc_leave_src(sk, mc_lst, idev); (void) ip6_mc_leave_src(sk, mc_lst, idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev); in6_dev_put(idev);
} }
ipv6_dev_mc_dec(dev, &mc_lst->addr);
dev_put(dev); dev_put(dev);
} }
...@@ -870,7 +872,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr) ...@@ -870,7 +872,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr)
/* /*
* device multicast group del * device multicast group del
*/ */
static int __ipv6_dev_mc_dec(struct net_device *dev, struct inet6_dev *idev, struct in6_addr *addr) int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct ifmcaddr6 *ma, **map; struct ifmcaddr6 *ma, **map;
...@@ -903,7 +905,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -903,7 +905,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr)
if (!idev) if (!idev)
return -ENODEV; return -ENODEV;
err = __ipv6_dev_mc_dec(dev, idev, addr); err = __ipv6_dev_mc_dec(idev, addr);
in6_dev_put(idev); in6_dev_put(idev);
...@@ -2108,7 +2110,12 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) ...@@ -2108,7 +2110,12 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
* addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
* fail. * fail.
*/ */
__ipv6_dev_mc_dec(idev->dev, idev, &maddr); __ipv6_dev_mc_dec(idev, &maddr);
if (idev->cnf.forwarding) {
ipv6_addr_all_routers(&maddr);
__ipv6_dev_mc_dec(idev, &maddr);
}
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) { while ((i = idev->mc_list) != NULL) {
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/jhash.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/snmp.h> #include <net/snmp.h>
...@@ -270,15 +271,14 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d ...@@ -270,15 +271,14 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
static u32 ndisc_hash(const void *pkey, const struct net_device *dev) static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; const u32 *p32 = pkey;
u32 addr_hash, i;
hash_val = *(u32*)(pkey + sizeof(struct in6_addr) - 4); addr_hash = 0;
hash_val ^= (hash_val>>16); for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
hash_val ^= hash_val>>8; addr_hash ^= *p32++;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val; return jhash_2words(addr_hash, dev->ifindex, nd_tbl.hash_rnd);
} }
static int ndisc_constructor(struct neighbour *neigh) static int ndisc_constructor(struct neighbour *neigh)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment